net/qede: refactoring vport handling code
[dpdk.git] / drivers / net / qede / qede_ethdev.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12
13 /* Globals */
14 static const struct qed_eth_ops *qed_ops;
15 static int64_t timer_period = 1;
16
17 /* VXLAN tunnel classification mapping */
18 const struct _qede_vxlan_tunn_types {
19         uint16_t rte_filter_type;
20         enum ecore_filter_ucast_type qede_type;
21         enum ecore_tunn_clss qede_tunn_clss;
22         const char *string;
23 } qede_tunn_types[] = {
24         {
25                 ETH_TUNNEL_FILTER_OMAC,
26                 ECORE_FILTER_MAC,
27                 ECORE_TUNN_CLSS_MAC_VLAN,
28                 "outer-mac"
29         },
30         {
31                 ETH_TUNNEL_FILTER_TENID,
32                 ECORE_FILTER_VNI,
33                 ECORE_TUNN_CLSS_MAC_VNI,
34                 "vni"
35         },
36         {
37                 ETH_TUNNEL_FILTER_IMAC,
38                 ECORE_FILTER_INNER_MAC,
39                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
40                 "inner-mac"
41         },
42         {
43                 ETH_TUNNEL_FILTER_IVLAN,
44                 ECORE_FILTER_INNER_VLAN,
45                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
46                 "inner-vlan"
47         },
48         {
49                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
50                 ECORE_FILTER_MAC_VNI_PAIR,
51                 ECORE_TUNN_CLSS_MAC_VNI,
52                 "outer-mac and vni"
53         },
54         {
55                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
56                 ECORE_FILTER_UNUSED,
57                 MAX_ECORE_TUNN_CLSS,
58                 "outer-mac and inner-mac"
59         },
60         {
61                 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
62                 ECORE_FILTER_UNUSED,
63                 MAX_ECORE_TUNN_CLSS,
64                 "outer-mac and inner-vlan"
65         },
66         {
67                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
68                 ECORE_FILTER_INNER_MAC_VNI_PAIR,
69                 ECORE_TUNN_CLSS_INNER_MAC_VNI,
70                 "vni and inner-mac",
71         },
72         {
73                 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
74                 ECORE_FILTER_UNUSED,
75                 MAX_ECORE_TUNN_CLSS,
76                 "vni and inner-vlan",
77         },
78         {
79                 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
80                 ECORE_FILTER_INNER_PAIR,
81                 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
82                 "inner-mac and inner-vlan",
83         },
84         {
85                 ETH_TUNNEL_FILTER_OIP,
86                 ECORE_FILTER_UNUSED,
87                 MAX_ECORE_TUNN_CLSS,
88                 "outer-IP"
89         },
90         {
91                 ETH_TUNNEL_FILTER_IIP,
92                 ECORE_FILTER_UNUSED,
93                 MAX_ECORE_TUNN_CLSS,
94                 "inner-IP"
95         },
96         {
97                 RTE_TUNNEL_FILTER_IMAC_IVLAN,
98                 ECORE_FILTER_UNUSED,
99                 MAX_ECORE_TUNN_CLSS,
100                 "IMAC_IVLAN"
101         },
102         {
103                 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
104                 ECORE_FILTER_UNUSED,
105                 MAX_ECORE_TUNN_CLSS,
106                 "IMAC_IVLAN_TENID"
107         },
108         {
109                 RTE_TUNNEL_FILTER_IMAC_TENID,
110                 ECORE_FILTER_UNUSED,
111                 MAX_ECORE_TUNN_CLSS,
112                 "IMAC_TENID"
113         },
114         {
115                 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
116                 ECORE_FILTER_UNUSED,
117                 MAX_ECORE_TUNN_CLSS,
118                 "OMAC_TENID_IMAC"
119         },
120 };
121
122 struct rte_qede_xstats_name_off {
123         char name[RTE_ETH_XSTATS_NAME_SIZE];
124         uint64_t offset;
125 };
126
127 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
128         {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
129         {"rx_multicast_bytes",
130                 offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
131         {"rx_broadcast_bytes",
132                 offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
133         {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
134         {"rx_multicast_packets",
135                 offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
136         {"rx_broadcast_packets",
137                 offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
138
139         {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
140         {"tx_multicast_bytes",
141                 offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
142         {"tx_broadcast_bytes",
143                 offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
144         {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
145         {"tx_multicast_packets",
146                 offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
147         {"tx_broadcast_packets",
148                 offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
149
150         {"rx_64_byte_packets",
151                 offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
152         {"rx_65_to_127_byte_packets",
153                 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
154         {"rx_128_to_255_byte_packets",
155                 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
156         {"rx_256_to_511_byte_packets",
157                 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
158         {"rx_512_to_1023_byte_packets",
159                 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
160         {"rx_1024_to_1518_byte_packets",
161                 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
162         {"rx_1519_to_1522_byte_packets",
163                 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
164         {"rx_1519_to_2047_byte_packets",
165                 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
166         {"rx_2048_to_4095_byte_packets",
167                 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
168         {"rx_4096_to_9216_byte_packets",
169                 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
170         {"rx_9217_to_16383_byte_packets",
171                 offsetof(struct ecore_eth_stats,
172                          rx_9217_to_16383_byte_packets)},
173         {"tx_64_byte_packets",
174                 offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
175         {"tx_65_to_127_byte_packets",
176                 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
177         {"tx_128_to_255_byte_packets",
178                 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
179         {"tx_256_to_511_byte_packets",
180                 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
181         {"tx_512_to_1023_byte_packets",
182                 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
183         {"tx_1024_to_1518_byte_packets",
184                 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
185         {"trx_1519_to_1522_byte_packets",
186                 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
187         {"tx_2048_to_4095_byte_packets",
188                 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
189         {"tx_4096_to_9216_byte_packets",
190                 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
191         {"tx_9217_to_16383_byte_packets",
192                 offsetof(struct ecore_eth_stats,
193                          tx_9217_to_16383_byte_packets)},
194
195         {"rx_mac_crtl_frames",
196                 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
197         {"tx_mac_control_frames",
198                 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
199         {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
200         {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
201         {"rx_priority_flow_control_frames",
202                 offsetof(struct ecore_eth_stats, rx_pfc_frames)},
203         {"tx_priority_flow_control_frames",
204                 offsetof(struct ecore_eth_stats, tx_pfc_frames)},
205
206         {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
207         {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
208         {"rx_carrier_errors",
209                 offsetof(struct ecore_eth_stats, rx_carrier_errors)},
210         {"rx_oversize_packet_errors",
211                 offsetof(struct ecore_eth_stats, rx_oversize_packets)},
212         {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
213         {"rx_undersize_packet_errors",
214                 offsetof(struct ecore_eth_stats, rx_undersize_packets)},
215         {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
216         {"rx_host_buffer_not_available",
217                 offsetof(struct ecore_eth_stats, no_buff_discards)},
218         /* Number of packets discarded because they are bigger than MTU */
219         {"rx_packet_too_big_discards",
220                 offsetof(struct ecore_eth_stats, packet_too_big_discard)},
221         {"rx_ttl_zero_discards",
222                 offsetof(struct ecore_eth_stats, ttl0_discard)},
223         {"rx_multi_function_tag_filter_discards",
224                 offsetof(struct ecore_eth_stats, mftag_filter_discards)},
225         {"rx_mac_filter_discards",
226                 offsetof(struct ecore_eth_stats, mac_filter_discards)},
227         {"rx_hw_buffer_truncates",
228                 offsetof(struct ecore_eth_stats, brb_truncates)},
229         {"rx_hw_buffer_discards",
230                 offsetof(struct ecore_eth_stats, brb_discards)},
231         {"tx_lpi_entry_count",
232                 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
233         {"tx_total_collisions",
234                 offsetof(struct ecore_eth_stats, tx_total_collisions)},
235         {"tx_error_drop_packets",
236                 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
237
238         {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
239         {"rx_mac_unicast_packets",
240                 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
241         {"rx_mac_multicast_packets",
242                 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
243         {"rx_mac_broadcast_packets",
244                 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
245         {"rx_mac_frames_ok",
246                 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
247         {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
248         {"tx_mac_unicast_packets",
249                 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
250         {"tx_mac_multicast_packets",
251                 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
252         {"tx_mac_broadcast_packets",
253                 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
254
255         {"lro_coalesced_packets",
256                 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
257         {"lro_coalesced_events",
258                 offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
259         {"lro_aborts_num",
260                 offsetof(struct ecore_eth_stats, tpa_aborts_num)},
261         {"lro_not_coalesced_packets",
262                 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
263         {"lro_coalesced_bytes",
264                 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
265 };
266
267 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
268         {"rx_q_segments",
269                 offsetof(struct qede_rx_queue, rx_segs)},
270         {"rx_q_hw_errors",
271                 offsetof(struct qede_rx_queue, rx_hw_errors)},
272         {"rx_q_allocation_errors",
273                 offsetof(struct qede_rx_queue, rx_alloc_errors)}
274 };
275
276 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
277 {
278         ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
279 }
280
281 static void
282 qede_interrupt_handler(void *param)
283 {
284         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
285         struct qede_dev *qdev = eth_dev->data->dev_private;
286         struct ecore_dev *edev = &qdev->edev;
287
288         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
289         if (rte_intr_enable(eth_dev->intr_handle))
290                 DP_ERR(edev, "rte_intr_enable failed\n");
291 }
292
293 static void
294 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
295 {
296         rte_memcpy(&qdev->dev_info, info, sizeof(*info));
297         qdev->num_tc = qdev->dev_info.num_tc;
298         qdev->ops = qed_ops;
299 }
300
301 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
302 static void qede_print_adapter_info(struct qede_dev *qdev)
303 {
304         struct ecore_dev *edev = &qdev->edev;
305         struct qed_dev_info *info = &qdev->dev_info.common;
306         static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
307         static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
308
309         DP_INFO(edev, "*********************************\n");
310         DP_INFO(edev, " DPDK version:%s\n", rte_version());
311         DP_INFO(edev, " Chip details : %s%d\n",
312                   ECORE_IS_BB(edev) ? "BB" : "AH",
313                   CHIP_REV_IS_A0(edev) ? 0 : 1);
314         snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
315                  info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
316         snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
317                  ver_str, QEDE_PMD_VERSION);
318         DP_INFO(edev, " Driver version : %s\n", drv_ver);
319         DP_INFO(edev, " Firmware version : %s\n", ver_str);
320
321         snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
322                  "%d.%d.%d.%d",
323                 (info->mfw_rev >> 24) & 0xff,
324                 (info->mfw_rev >> 16) & 0xff,
325                 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
326         DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
327         DP_INFO(edev, " Firmware file : %s\n", fw_file);
328         DP_INFO(edev, "*********************************\n");
329 }
330 #endif
331
332 static int
333 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
334 {
335         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
336         struct ecore_sp_vport_start_params params;
337         struct ecore_hwfn *p_hwfn;
338         int rc;
339         int i;
340
341         memset(&params, 0, sizeof(params));
342         params.vport_id = 0;
343         params.mtu = mtu;
344         /* @DPDK - Disable FW placement */
345         params.zero_placement_offset = 1;
346         for_each_hwfn(edev, i) {
347                 p_hwfn = &edev->hwfns[i];
348                 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
349                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
350                 rc = ecore_sp_vport_start(p_hwfn, &params);
351                 if (rc != ECORE_SUCCESS) {
352                         DP_ERR(edev, "Start V-PORT failed %d\n", rc);
353                         return rc;
354                 }
355         }
356         ecore_reset_vport_stats(edev);
357         DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
358
359         return 0;
360 }
361
362 static int
363 qede_stop_vport(struct ecore_dev *edev)
364 {
365         struct ecore_hwfn *p_hwfn;
366         uint8_t vport_id;
367         int rc;
368         int i;
369
370         vport_id = 0;
371         for_each_hwfn(edev, i) {
372                 p_hwfn = &edev->hwfns[i];
373                 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
374                                          vport_id);
375                 if (rc != ECORE_SUCCESS) {
376                         DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
377                         return rc;
378                 }
379         }
380
381         return 0;
382 }
383
384 /* Activate or deactivate vport via vport-update */
385 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
386 {
387         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
388         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
389         struct ecore_sp_vport_update_params params;
390         struct ecore_hwfn *p_hwfn;
391         uint8_t i;
392         int rc = -1;
393
394         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
395         params.vport_id = 0;
396         params.update_vport_active_rx_flg = 1;
397         params.update_vport_active_tx_flg = 1;
398         params.vport_active_rx_flg = flg;
399         params.vport_active_tx_flg = flg;
400         for_each_hwfn(edev, i) {
401                 p_hwfn = &edev->hwfns[i];
402                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
403                 rc = ecore_sp_vport_update(p_hwfn, &params,
404                                 ECORE_SPQ_MODE_EBLOCK, NULL);
405                 if (rc != ECORE_SUCCESS) {
406                         DP_ERR(edev, "Failed to update vport\n");
407                         break;
408                 }
409         }
410         DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated");
411         return rc;
412 }
413
414 static void
415 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
416                            uint16_t mtu, bool enable)
417 {
418         /* Enable LRO in split mode */
419         sge_tpa_params->tpa_ipv4_en_flg = enable;
420         sge_tpa_params->tpa_ipv6_en_flg = enable;
421         sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
422         sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
423         /* set if tpa enable changes */
424         sge_tpa_params->update_tpa_en_flg = 1;
425         /* set if tpa parameters should be handled */
426         sge_tpa_params->update_tpa_param_flg = enable;
427
428         sge_tpa_params->max_buffers_per_cqe = 20;
429         /* Enable TPA in split mode. In this mode each TPA segment
430          * starts on the new BD, so there is one BD per segment.
431          */
432         sge_tpa_params->tpa_pkt_split_flg = 1;
433         sge_tpa_params->tpa_hdr_data_split_flg = 0;
434         sge_tpa_params->tpa_gro_consistent_flg = 0;
435         sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
436         sge_tpa_params->tpa_max_size = 0x7FFF;
437         sge_tpa_params->tpa_min_size_to_start = mtu / 2;
438         sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
439 }
440
441 /* Enable/disable LRO via vport-update */
442 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
443 {
444         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
445         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
446         struct ecore_sp_vport_update_params params;
447         struct ecore_sge_tpa_params tpa_params;
448         struct ecore_hwfn *p_hwfn;
449         int rc;
450         int i;
451
452         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
453         memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
454         qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
455         params.vport_id = 0;
456         params.sge_tpa_params = &tpa_params;
457         for_each_hwfn(edev, i) {
458                 p_hwfn = &edev->hwfns[i];
459                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
460                 rc = ecore_sp_vport_update(p_hwfn, &params,
461                                 ECORE_SPQ_MODE_EBLOCK, NULL);
462                 if (rc != ECORE_SUCCESS) {
463                         DP_ERR(edev, "Failed to update LRO\n");
464                         return -1;
465                 }
466         }
467
468         DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
469
470         return 0;
471 }
472
473 /* Update MTU via vport-update without doing port restart.
474  * The vport must be deactivated before calling this API.
475  */
476 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
477 {
478         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
479         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
480         struct ecore_sp_vport_update_params params;
481         struct ecore_hwfn *p_hwfn;
482         int rc;
483         int i;
484
485         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
486         params.vport_id = 0;
487         params.mtu = mtu;
488         params.vport_id = 0;
489         for_each_hwfn(edev, i) {
490                 p_hwfn = &edev->hwfns[i];
491                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
492                 rc = ecore_sp_vport_update(p_hwfn, &params,
493                                 ECORE_SPQ_MODE_EBLOCK, NULL);
494                 if (rc != ECORE_SUCCESS) {
495                         DP_ERR(edev, "Failed to update MTU\n");
496                         return -1;
497                 }
498         }
499         DP_INFO(edev, "MTU updated to %u\n", mtu);
500
501         return 0;
502 }
503
504 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
505 {
506         memset(ucast, 0, sizeof(struct ecore_filter_ucast));
507         ucast->is_rx_filter = true;
508         ucast->is_tx_filter = true;
509         /* ucast->assert_on_error = true; - For debug */
510 }
511
512 static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
513                                     uint8_t clss, bool mode, bool mask)
514 {
515         memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
516         p_tunn->vxlan.b_update_mode = mode;
517         p_tunn->vxlan.b_mode_enabled = mask;
518         p_tunn->b_update_rx_cls = true;
519         p_tunn->b_update_tx_cls = true;
520         p_tunn->vxlan.tun_cls = clss;
521 }
522
523 static int
524 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
525                   bool add)
526 {
527         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
528         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
529         struct qede_ucast_entry *tmp = NULL;
530         struct qede_ucast_entry *u;
531         struct ether_addr *mac_addr;
532
533         mac_addr  = (struct ether_addr *)ucast->mac;
534         if (add) {
535                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
536                         if ((memcmp(mac_addr, &tmp->mac,
537                                     ETHER_ADDR_LEN) == 0) &&
538                              ucast->vlan == tmp->vlan) {
539                                 DP_ERR(edev, "Unicast MAC is already added"
540                                        " with vlan = %u, vni = %u\n",
541                                        ucast->vlan,  ucast->vni);
542                                         return -EEXIST;
543                         }
544                 }
545                 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
546                                RTE_CACHE_LINE_SIZE);
547                 if (!u) {
548                         DP_ERR(edev, "Did not allocate memory for ucast\n");
549                         return -ENOMEM;
550                 }
551                 ether_addr_copy(mac_addr, &u->mac);
552                 u->vlan = ucast->vlan;
553                 u->vni = ucast->vni;
554                 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
555                 qdev->num_uc_addr++;
556         } else {
557                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
558                         if ((memcmp(mac_addr, &tmp->mac,
559                                     ETHER_ADDR_LEN) == 0) &&
560                             ucast->vlan == tmp->vlan      &&
561                             ucast->vni == tmp->vni)
562                         break;
563                 }
564                 if (tmp == NULL) {
565                         DP_INFO(edev, "Unicast MAC is not found\n");
566                         return -EINVAL;
567                 }
568                 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
569                 qdev->num_uc_addr--;
570         }
571
572         return 0;
573 }
574
575 static int
576 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
577                   bool add)
578 {
579         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
580         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
581         struct ether_addr *mac_addr;
582         struct qede_mcast_entry *tmp = NULL;
583         struct qede_mcast_entry *m;
584
585         mac_addr  = (struct ether_addr *)mcast->mac;
586         if (add) {
587                 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
588                         if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
589                                 DP_ERR(edev,
590                                         "Multicast MAC is already added\n");
591                                 return -EEXIST;
592                         }
593                 }
594                 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
595                         RTE_CACHE_LINE_SIZE);
596                 if (!m) {
597                         DP_ERR(edev,
598                                 "Did not allocate memory for mcast\n");
599                         return -ENOMEM;
600                 }
601                 ether_addr_copy(mac_addr, &m->mac);
602                 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
603                 qdev->num_mc_addr++;
604         } else {
605                 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
606                         if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
607                                 break;
608                 }
609                 if (tmp == NULL) {
610                         DP_INFO(edev, "Multicast mac is not found\n");
611                         return -EINVAL;
612                 }
613                 SLIST_REMOVE(&qdev->mc_list_head, tmp,
614                              qede_mcast_entry, list);
615                 qdev->num_mc_addr--;
616         }
617
618         return 0;
619 }
620
621 static enum _ecore_status_t
622 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
623                  bool add)
624 {
625         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
626         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
627         enum _ecore_status_t rc;
628         struct ecore_filter_mcast mcast;
629         struct qede_mcast_entry *tmp;
630         uint16_t j = 0;
631
632         /* Multicast */
633         if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
634                 if (add) {
635                         if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
636                                 DP_ERR(edev,
637                                        "Mcast filter table limit exceeded, "
638                                        "Please enable mcast promisc mode\n");
639                                 return -ECORE_INVAL;
640                         }
641                 }
642                 rc = qede_mcast_filter(eth_dev, ucast, add);
643                 if (rc == 0) {
644                         DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
645                         memset(&mcast, 0, sizeof(mcast));
646                         mcast.num_mc_addrs = qdev->num_mc_addr;
647                         mcast.opcode = ECORE_FILTER_ADD;
648                         SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
649                                 ether_addr_copy(&tmp->mac,
650                                         (struct ether_addr *)&mcast.mac[j]);
651                                 j++;
652                         }
653                         rc = ecore_filter_mcast_cmd(edev, &mcast,
654                                                     ECORE_SPQ_MODE_CB, NULL);
655                 }
656                 if (rc != ECORE_SUCCESS) {
657                         DP_ERR(edev, "Failed to add multicast filter"
658                                " rc = %d, op = %d\n", rc, add);
659                 }
660         } else { /* Unicast */
661                 if (add) {
662                         if (qdev->num_uc_addr >=
663                             qdev->dev_info.num_mac_filters) {
664                                 DP_ERR(edev,
665                                        "Ucast filter table limit exceeded,"
666                                        " Please enable promisc mode\n");
667                                 return -ECORE_INVAL;
668                         }
669                 }
670                 rc = qede_ucast_filter(eth_dev, ucast, add);
671                 if (rc == 0)
672                         rc = ecore_filter_ucast_cmd(edev, ucast,
673                                                     ECORE_SPQ_MODE_CB, NULL);
674                 if (rc != ECORE_SUCCESS) {
675                         DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
676                                rc, add);
677                 }
678         }
679
680         return rc;
681 }
682
683 static int
684 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
685                   __rte_unused uint32_t index, __rte_unused uint32_t pool)
686 {
687         struct ecore_filter_ucast ucast;
688         int re;
689
690         qede_set_ucast_cmn_params(&ucast);
691         ucast.type = ECORE_FILTER_MAC;
692         ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
693         re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
694         return re;
695 }
696
697 static void
698 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
699 {
700         struct qede_dev *qdev = eth_dev->data->dev_private;
701         struct ecore_dev *edev = &qdev->edev;
702         struct ecore_filter_ucast ucast;
703
704         PMD_INIT_FUNC_TRACE(edev);
705
706         if (index >= qdev->dev_info.num_mac_filters) {
707                 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
708                        index, qdev->dev_info.num_mac_filters);
709                 return;
710         }
711
712         qede_set_ucast_cmn_params(&ucast);
713         ucast.opcode = ECORE_FILTER_REMOVE;
714         ucast.type = ECORE_FILTER_MAC;
715
716         /* Use the index maintained by rte */
717         ether_addr_copy(&eth_dev->data->mac_addrs[index],
718                         (struct ether_addr *)&ucast.mac);
719
720         ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
721 }
722
723 static void
724 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
725 {
726         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
727         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
728
729         if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
730                                                mac_addr->addr_bytes)) {
731                 DP_ERR(edev, "Setting MAC address is not allowed\n");
732                 ether_addr_copy(&qdev->primary_mac,
733                                 &eth_dev->data->mac_addrs[0]);
734                 return;
735         }
736
737         qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
738 }
739
740 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
741 {
742         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
743         struct ecore_sp_vport_update_params params;
744         struct ecore_hwfn *p_hwfn;
745         uint8_t i;
746         int rc;
747
748         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
749         params.vport_id = 0;
750         params.update_accept_any_vlan_flg = 1;
751         params.accept_any_vlan = flg;
752         for_each_hwfn(edev, i) {
753                 p_hwfn = &edev->hwfns[i];
754                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
755                 rc = ecore_sp_vport_update(p_hwfn, &params,
756                                 ECORE_SPQ_MODE_EBLOCK, NULL);
757                 if (rc != ECORE_SUCCESS) {
758                         DP_ERR(edev, "Failed to configure accept-any-vlan\n");
759                         return;
760                 }
761         }
762
763         DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
764 }
765
766 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
767 {
768         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
769         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
770         struct ecore_sp_vport_update_params params;
771         struct ecore_hwfn *p_hwfn;
772         uint8_t i;
773         int rc;
774
775         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
776         params.vport_id = 0;
777         params.update_inner_vlan_removal_flg = 1;
778         params.inner_vlan_removal_flg = flg;
779         for_each_hwfn(edev, i) {
780                 p_hwfn = &edev->hwfns[i];
781                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
782                 rc = ecore_sp_vport_update(p_hwfn, &params,
783                                 ECORE_SPQ_MODE_EBLOCK, NULL);
784                 if (rc != ECORE_SUCCESS) {
785                         DP_ERR(edev, "Failed to update vport\n");
786                         return -1;
787                 }
788         }
789
790         DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
791         return 0;
792 }
793
794 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
795                                 uint16_t vlan_id, int on)
796 {
797         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
798         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
799         struct qed_dev_eth_info *dev_info = &qdev->dev_info;
800         struct qede_vlan_entry *tmp = NULL;
801         struct qede_vlan_entry *vlan;
802         struct ecore_filter_ucast ucast;
803         int rc;
804
805         if (on) {
806                 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
807                         DP_ERR(edev, "Reached max VLAN filter limit"
808                                       " enabling accept_any_vlan\n");
809                         qede_config_accept_any_vlan(qdev, true);
810                         return 0;
811                 }
812
813                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
814                         if (tmp->vid == vlan_id) {
815                                 DP_ERR(edev, "VLAN %u already configured\n",
816                                        vlan_id);
817                                 return -EEXIST;
818                         }
819                 }
820
821                 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
822                                   RTE_CACHE_LINE_SIZE);
823
824                 if (!vlan) {
825                         DP_ERR(edev, "Did not allocate memory for VLAN\n");
826                         return -ENOMEM;
827                 }
828
829                 qede_set_ucast_cmn_params(&ucast);
830                 ucast.opcode = ECORE_FILTER_ADD;
831                 ucast.type = ECORE_FILTER_VLAN;
832                 ucast.vlan = vlan_id;
833                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
834                                             NULL);
835                 if (rc != 0) {
836                         DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
837                                rc);
838                         rte_free(vlan);
839                 } else {
840                         vlan->vid = vlan_id;
841                         SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
842                         qdev->configured_vlans++;
843                         DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
844                                 vlan_id, qdev->configured_vlans);
845                 }
846         } else {
847                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
848                         if (tmp->vid == vlan_id)
849                                 break;
850                 }
851
852                 if (!tmp) {
853                         if (qdev->configured_vlans == 0) {
854                                 DP_INFO(edev,
855                                         "No VLAN filters configured yet\n");
856                                 return 0;
857                         }
858
859                         DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
860                         return -EINVAL;
861                 }
862
863                 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
864
865                 qede_set_ucast_cmn_params(&ucast);
866                 ucast.opcode = ECORE_FILTER_REMOVE;
867                 ucast.type = ECORE_FILTER_VLAN;
868                 ucast.vlan = vlan_id;
869                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
870                                             NULL);
871                 if (rc != 0) {
872                         DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
873                                vlan_id, rc);
874                 } else {
875                         qdev->configured_vlans--;
876                         DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
877                                 vlan_id, qdev->configured_vlans);
878                 }
879         }
880
881         return rc;
882 }
883
884 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
885 {
886         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
887         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
888         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
889
890         if (mask & ETH_VLAN_STRIP_MASK) {
891                 if (rxmode->hw_vlan_strip)
892                         (void)qede_vlan_stripping(eth_dev, 1);
893                 else
894                         (void)qede_vlan_stripping(eth_dev, 0);
895         }
896
897         if (mask & ETH_VLAN_FILTER_MASK) {
898                 /* VLAN filtering kicks in when a VLAN is added */
899                 if (rxmode->hw_vlan_filter) {
900                         qede_vlan_filter_set(eth_dev, 0, 1);
901                 } else {
902                         if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
903                                 DP_ERR(edev,
904                                   " Please remove existing VLAN filters"
905                                   " before disabling VLAN filtering\n");
906                                 /* Signal app that VLAN filtering is still
907                                  * enabled
908                                  */
909                                 rxmode->hw_vlan_filter = true;
910                         } else {
911                                 qede_vlan_filter_set(eth_dev, 0, 0);
912                         }
913                 }
914         }
915
916         if (mask & ETH_VLAN_EXTEND_MASK)
917                 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
918                         " and classification is based on outer tag only\n");
919
920         DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
921                 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
922 }
923
924 static void qede_prandom_bytes(uint32_t *buff)
925 {
926         uint8_t i;
927
928         srand((unsigned int)time(NULL));
929         for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
930                 buff[i] = rand();
931 }
932
933 int qede_config_rss(struct rte_eth_dev *eth_dev)
934 {
935         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
936 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
937         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
938 #endif
939         uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
940         struct rte_eth_rss_reta_entry64 reta_conf[2];
941         struct rte_eth_rss_conf rss_conf;
942         uint32_t i, id, pos, q;
943
944         rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
945         if (!rss_conf.rss_key) {
946                 DP_INFO(edev, "Applying driver default key\n");
947                 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
948                 qede_prandom_bytes(&def_rss_key[0]);
949                 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
950         }
951
952         /* Configure RSS hash */
953         if (qede_rss_hash_update(eth_dev, &rss_conf))
954                 return -EINVAL;
955
956         /* Configure default RETA */
957         memset(reta_conf, 0, sizeof(reta_conf));
958         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
959                 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
960
961         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
962                 id = i / RTE_RETA_GROUP_SIZE;
963                 pos = i % RTE_RETA_GROUP_SIZE;
964                 q = i % QEDE_RSS_COUNT(qdev);
965                 reta_conf[id].reta[pos] = q;
966         }
967         if (qede_rss_reta_update(eth_dev, &reta_conf[0],
968                                  ECORE_RSS_IND_TABLE_SIZE))
969                 return -EINVAL;
970
971         return 0;
972 }
973
974 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
975 {
976         struct qede_dev *qdev = eth_dev->data->dev_private;
977         struct ecore_dev *edev = &qdev->edev;
978         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
979         int rc;
980
981         PMD_INIT_FUNC_TRACE(edev);
982
983         /* Check requirements for 100G mode */
984         if (edev->num_hwfns > 1) {
985                 if (eth_dev->data->nb_rx_queues < 2 ||
986                     eth_dev->data->nb_tx_queues < 2) {
987                         DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
988                         return -EINVAL;
989                 }
990
991                 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
992                     (eth_dev->data->nb_tx_queues % 2 != 0)) {
993                         DP_ERR(edev,
994                                   "100G mode needs even no. of RX/TX queues\n");
995                         return -EINVAL;
996                 }
997         }
998
999         /* Sanity checks and throw warnings */
1000         if (rxmode->enable_scatter == 1)
1001                 eth_dev->data->scattered_rx = 1;
1002
1003         if (!rxmode->hw_strip_crc)
1004                 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
1005
1006         if (!rxmode->hw_ip_checksum)
1007                 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
1008                               "in hw\n");
1009
1010         if (rxmode->enable_lro) {
1011                 qdev->enable_lro = true;
1012                 /* Enable scatter mode for LRO */
1013                 if (!rxmode->enable_scatter)
1014                         eth_dev->data->scattered_rx = 1;
1015         }
1016
1017         /* Check for the port restart case */
1018         if (qdev->state != QEDE_DEV_INIT) {
1019                 qede_stop_vport(edev);
1020                 qede_dealloc_fp_resc(eth_dev);
1021         }
1022
1023         qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
1024         qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
1025         qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
1026
1027         /* Fastpath status block should be initialized before sending
1028          * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
1029          */
1030         rc = qede_alloc_fp_resc(qdev);
1031         if (rc != 0)
1032                 return rc;
1033
1034         /* VF's MTU has to be set using vport-start where as
1035          * PF's MTU can be updated via vport-update.
1036          */
1037         if (IS_VF(edev)) {
1038                 if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
1039                         return -1;
1040         } else {
1041                 if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
1042                         return -1;
1043         }
1044
1045         qdev->mtu = rxmode->max_rx_pkt_len;
1046         qdev->new_mtu = qdev->mtu;
1047
1048         if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
1049             rxmode->mq_mode == ETH_MQ_RX_NONE)) {
1050                 DP_ERR(edev, "Unsupported RSS mode\n");
1051                 qede_stop_vport(edev);
1052                 qede_dealloc_fp_resc(eth_dev);
1053                 return -EINVAL;
1054         }
1055
1056         /* Flow director mode check */
1057         rc = qede_check_fdir_support(eth_dev);
1058         if (rc) {
1059                 qede_stop_vport(edev);
1060                 qede_dealloc_fp_resc(eth_dev);
1061                 return -EINVAL;
1062         }
1063         SLIST_INIT(&qdev->fdir_info.fdir_list_head);
1064
1065         SLIST_INIT(&qdev->vlan_list_head);
1066
1067         /* Enable VLAN offloads by default */
1068         qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
1069                                        ETH_VLAN_FILTER_MASK |
1070                                        ETH_VLAN_EXTEND_MASK);
1071
1072         qdev->state = QEDE_DEV_CONFIG;
1073
1074         DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
1075                 (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
1076                 qdev->num_tc);
1077
1078         return 0;
1079 }
1080
1081 /* Info about HW descriptor ring limitations */
1082 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1083         .nb_max = NUM_RX_BDS_MAX,
1084         .nb_min = 128,
1085         .nb_align = 128 /* lowest common multiple */
1086 };
1087
1088 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1089         .nb_max = NUM_TX_BDS_MAX,
1090         .nb_min = 256,
1091         .nb_align = 256,
1092         .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1093         .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1094 };
1095
1096 static void
1097 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1098                   struct rte_eth_dev_info *dev_info)
1099 {
1100         struct qede_dev *qdev = eth_dev->data->dev_private;
1101         struct ecore_dev *edev = &qdev->edev;
1102         struct qed_link_output link;
1103         uint32_t speed_cap = 0;
1104
1105         PMD_INIT_FUNC_TRACE(edev);
1106
1107         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1108         dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1109         dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1110         dev_info->rx_desc_lim = qede_rx_desc_lim;
1111         dev_info->tx_desc_lim = qede_tx_desc_lim;
1112
1113         if (IS_PF(edev))
1114                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1115                         QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1116         else
1117                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1118                         QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1119         dev_info->max_tx_queues = dev_info->max_rx_queues;
1120
1121         dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1122         dev_info->max_vfs = 0;
1123         dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1124         dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1125         dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1126
1127         dev_info->default_txconf = (struct rte_eth_txconf) {
1128                 .txq_flags = QEDE_TXQ_FLAGS,
1129         };
1130
1131         dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP  |
1132                                      DEV_RX_OFFLOAD_IPV4_CKSUM  |
1133                                      DEV_RX_OFFLOAD_UDP_CKSUM   |
1134                                      DEV_RX_OFFLOAD_TCP_CKSUM   |
1135                                      DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1136                                      DEV_RX_OFFLOAD_TCP_LRO);
1137
1138         dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1139                                      DEV_TX_OFFLOAD_IPV4_CKSUM  |
1140                                      DEV_TX_OFFLOAD_UDP_CKSUM   |
1141                                      DEV_TX_OFFLOAD_TCP_CKSUM   |
1142                                      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1143                                      DEV_TX_OFFLOAD_TCP_TSO |
1144                                      DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
1145
1146         memset(&link, 0, sizeof(struct qed_link_output));
1147         qdev->ops->common->get_link(edev, &link);
1148         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1149                 speed_cap |= ETH_LINK_SPEED_1G;
1150         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1151                 speed_cap |= ETH_LINK_SPEED_10G;
1152         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1153                 speed_cap |= ETH_LINK_SPEED_25G;
1154         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1155                 speed_cap |= ETH_LINK_SPEED_40G;
1156         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1157                 speed_cap |= ETH_LINK_SPEED_50G;
1158         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1159                 speed_cap |= ETH_LINK_SPEED_100G;
1160         dev_info->speed_capa = speed_cap;
1161 }
1162
1163 /* return 0 means link status changed, -1 means not changed */
1164 static int
1165 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1166 {
1167         struct qede_dev *qdev = eth_dev->data->dev_private;
1168         struct ecore_dev *edev = &qdev->edev;
1169         uint16_t link_duplex;
1170         struct qed_link_output link;
1171         struct rte_eth_link *curr = &eth_dev->data->dev_link;
1172
1173         memset(&link, 0, sizeof(struct qed_link_output));
1174         qdev->ops->common->get_link(edev, &link);
1175
1176         /* Link Speed */
1177         curr->link_speed = link.speed;
1178
1179         /* Link Mode */
1180         switch (link.duplex) {
1181         case QEDE_DUPLEX_HALF:
1182                 link_duplex = ETH_LINK_HALF_DUPLEX;
1183                 break;
1184         case QEDE_DUPLEX_FULL:
1185                 link_duplex = ETH_LINK_FULL_DUPLEX;
1186                 break;
1187         case QEDE_DUPLEX_UNKNOWN:
1188         default:
1189                 link_duplex = -1;
1190         }
1191         curr->link_duplex = link_duplex;
1192
1193         /* Link Status */
1194         curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
1195
1196         /* AN */
1197         curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1198                              ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1199
1200         DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1201                 curr->link_speed, curr->link_duplex,
1202                 curr->link_autoneg, curr->link_status);
1203
1204         /* return 0 means link status changed, -1 means not changed */
1205         return ((curr->link_status == link.link_up) ? -1 : 0);
1206 }
1207
1208 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1209 {
1210 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1211         struct qede_dev *qdev = eth_dev->data->dev_private;
1212         struct ecore_dev *edev = &qdev->edev;
1213
1214         PMD_INIT_FUNC_TRACE(edev);
1215 #endif
1216
1217         enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1218
1219         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1220                 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1221
1222         qed_configure_filter_rx_mode(eth_dev, type);
1223 }
1224
1225 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1226 {
1227 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1228         struct qede_dev *qdev = eth_dev->data->dev_private;
1229         struct ecore_dev *edev = &qdev->edev;
1230
1231         PMD_INIT_FUNC_TRACE(edev);
1232 #endif
1233
1234         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1235                 qed_configure_filter_rx_mode(eth_dev,
1236                                 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1237         else
1238                 qed_configure_filter_rx_mode(eth_dev,
1239                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1240 }
1241
1242 static void qede_poll_sp_sb_cb(void *param)
1243 {
1244         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1245         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1246         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1247         int rc;
1248
1249         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1250         qede_interrupt_action(&edev->hwfns[1]);
1251
1252         rc = rte_eal_alarm_set(timer_period * US_PER_S,
1253                                qede_poll_sp_sb_cb,
1254                                (void *)eth_dev);
1255         if (rc != 0) {
1256                 DP_ERR(edev, "Unable to start periodic"
1257                              " timer rc %d\n", rc);
1258                 assert(false && "Unable to start periodic timer");
1259         }
1260 }
1261
1262 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1263 {
1264         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1265         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1266         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1267
1268         PMD_INIT_FUNC_TRACE(edev);
1269
1270         qede_fdir_dealloc_resc(eth_dev);
1271
1272         /* dev_stop() shall cleanup fp resources in hw but without releasing
1273          * dma memories and sw structures so that dev_start() can be called
1274          * by the app without reconfiguration. However, in dev_close() we
1275          * can release all the resources and device can be brought up newly
1276          */
1277         if (qdev->state != QEDE_DEV_STOP)
1278                 qede_dev_stop(eth_dev);
1279         else
1280                 DP_INFO(edev, "Device is already stopped\n");
1281
1282         qede_stop_vport(edev);
1283
1284         qede_dealloc_fp_resc(eth_dev);
1285
1286         qdev->ops->common->slowpath_stop(edev);
1287
1288         qdev->ops->common->remove(edev);
1289
1290         rte_intr_disable(&pci_dev->intr_handle);
1291
1292         rte_intr_callback_unregister(&pci_dev->intr_handle,
1293                                      qede_interrupt_handler, (void *)eth_dev);
1294
1295         if (edev->num_hwfns > 1)
1296                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1297
1298         qdev->state = QEDE_DEV_INIT; /* Go back to init state */
1299 }
1300
1301 static void
1302 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1303 {
1304         struct qede_dev *qdev = eth_dev->data->dev_private;
1305         struct ecore_dev *edev = &qdev->edev;
1306         struct ecore_eth_stats stats;
1307         unsigned int i = 0, j = 0, qid;
1308         unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1309         struct qede_tx_queue *txq;
1310
1311         qdev->ops->get_vport_stats(edev, &stats);
1312
1313         /* RX Stats */
1314         eth_stats->ipackets = stats.rx_ucast_pkts +
1315             stats.rx_mcast_pkts + stats.rx_bcast_pkts;
1316
1317         eth_stats->ibytes = stats.rx_ucast_bytes +
1318             stats.rx_mcast_bytes + stats.rx_bcast_bytes;
1319
1320         eth_stats->ierrors = stats.rx_crc_errors +
1321             stats.rx_align_errors +
1322             stats.rx_carrier_errors +
1323             stats.rx_oversize_packets +
1324             stats.rx_jabbers + stats.rx_undersize_packets;
1325
1326         eth_stats->rx_nombuf = stats.no_buff_discards;
1327
1328         eth_stats->imissed = stats.mftag_filter_discards +
1329             stats.mac_filter_discards +
1330             stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
1331
1332         /* TX stats */
1333         eth_stats->opackets = stats.tx_ucast_pkts +
1334             stats.tx_mcast_pkts + stats.tx_bcast_pkts;
1335
1336         eth_stats->obytes = stats.tx_ucast_bytes +
1337             stats.tx_mcast_bytes + stats.tx_bcast_bytes;
1338
1339         eth_stats->oerrors = stats.tx_err_drop_pkts;
1340
1341         /* Queue stats */
1342         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1343                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1344         txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1345                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1346         if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1347             (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1348                 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1349                        "Not all the queue stats will be displayed. Set"
1350                        " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1351                        " appropriately and retry.\n");
1352
1353         for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
1354                 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
1355                         eth_stats->q_ipackets[i] =
1356                                 *(uint64_t *)(
1357                                         ((char *)(qdev->fp_array[(qid)].rxq)) +
1358                                         offsetof(struct qede_rx_queue,
1359                                         rcv_pkts));
1360                         eth_stats->q_errors[i] =
1361                                 *(uint64_t *)(
1362                                         ((char *)(qdev->fp_array[(qid)].rxq)) +
1363                                         offsetof(struct qede_rx_queue,
1364                                         rx_hw_errors)) +
1365                                 *(uint64_t *)(
1366                                         ((char *)(qdev->fp_array[(qid)].rxq)) +
1367                                         offsetof(struct qede_rx_queue,
1368                                         rx_alloc_errors));
1369                         i++;
1370                 }
1371                 if (i == rxq_stat_cntrs)
1372                         break;
1373         }
1374
1375         for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
1376                 if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
1377                         txq = qdev->fp_array[(qid)].txqs[0];
1378                         eth_stats->q_opackets[j] =
1379                                 *((uint64_t *)(uintptr_t)
1380                                         (((uint64_t)(uintptr_t)(txq)) +
1381                                          offsetof(struct qede_tx_queue,
1382                                                   xmit_pkts)));
1383                         j++;
1384                 }
1385                 if (j == txq_stat_cntrs)
1386                         break;
1387         }
1388 }
1389
1390 static unsigned
1391 qede_get_xstats_count(struct qede_dev *qdev) {
1392         return RTE_DIM(qede_xstats_strings) +
1393                 (RTE_DIM(qede_rxq_xstats_strings) *
1394                  RTE_MIN(QEDE_RSS_COUNT(qdev),
1395                          RTE_ETHDEV_QUEUE_STAT_CNTRS));
1396 }
1397
1398 static int
1399 qede_get_xstats_names(struct rte_eth_dev *dev,
1400                       struct rte_eth_xstat_name *xstats_names,
1401                       __rte_unused unsigned int limit)
1402 {
1403         struct qede_dev *qdev = dev->data->dev_private;
1404         const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1405         unsigned int i, qid, stat_idx = 0;
1406         unsigned int rxq_stat_cntrs;
1407
1408         if (xstats_names != NULL) {
1409                 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1410                         snprintf(xstats_names[stat_idx].name,
1411                                 sizeof(xstats_names[stat_idx].name),
1412                                 "%s",
1413                                 qede_xstats_strings[i].name);
1414                         stat_idx++;
1415                 }
1416
1417                 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1418                                          RTE_ETHDEV_QUEUE_STAT_CNTRS);
1419                 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1420                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1421                                 snprintf(xstats_names[stat_idx].name,
1422                                         sizeof(xstats_names[stat_idx].name),
1423                                         "%.4s%d%s",
1424                                         qede_rxq_xstats_strings[i].name, qid,
1425                                         qede_rxq_xstats_strings[i].name + 4);
1426                                 stat_idx++;
1427                         }
1428                 }
1429         }
1430
1431         return stat_cnt;
1432 }
1433
1434 static int
1435 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1436                 unsigned int n)
1437 {
1438         struct qede_dev *qdev = dev->data->dev_private;
1439         struct ecore_dev *edev = &qdev->edev;
1440         struct ecore_eth_stats stats;
1441         const unsigned int num = qede_get_xstats_count(qdev);
1442         unsigned int i, qid, stat_idx = 0;
1443         unsigned int rxq_stat_cntrs;
1444
1445         if (n < num)
1446                 return num;
1447
1448         qdev->ops->get_vport_stats(edev, &stats);
1449
1450         for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1451                 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1452                                              qede_xstats_strings[i].offset);
1453                 xstats[stat_idx].id = stat_idx;
1454                 stat_idx++;
1455         }
1456
1457         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1458                                  RTE_ETHDEV_QUEUE_STAT_CNTRS);
1459         for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1460                 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
1461                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1462                                 xstats[stat_idx].value = *(uint64_t *)(
1463                                         ((char *)(qdev->fp_array[(qid)].rxq)) +
1464                                          qede_rxq_xstats_strings[i].offset);
1465                                 xstats[stat_idx].id = stat_idx;
1466                                 stat_idx++;
1467                         }
1468                 }
1469         }
1470
1471         return stat_idx;
1472 }
1473
1474 static void
1475 qede_reset_xstats(struct rte_eth_dev *dev)
1476 {
1477         struct qede_dev *qdev = dev->data->dev_private;
1478         struct ecore_dev *edev = &qdev->edev;
1479
1480         ecore_reset_vport_stats(edev);
1481 }
1482
1483 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1484 {
1485         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1486         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1487         struct qed_link_params link_params;
1488         int rc;
1489
1490         DP_INFO(edev, "setting link state %d\n", link_up);
1491         memset(&link_params, 0, sizeof(link_params));
1492         link_params.link_up = link_up;
1493         rc = qdev->ops->common->set_link(edev, &link_params);
1494         if (rc != ECORE_SUCCESS)
1495                 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1496
1497         return rc;
1498 }
1499
1500 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1501 {
1502         return qede_dev_set_link_state(eth_dev, true);
1503 }
1504
1505 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1506 {
1507         return qede_dev_set_link_state(eth_dev, false);
1508 }
1509
1510 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1511 {
1512         struct qede_dev *qdev = eth_dev->data->dev_private;
1513         struct ecore_dev *edev = &qdev->edev;
1514
1515         ecore_reset_vport_stats(edev);
1516 }
1517
1518 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1519 {
1520         enum qed_filter_rx_mode_type type =
1521             QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1522
1523         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1524                 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1525
1526         qed_configure_filter_rx_mode(eth_dev, type);
1527 }
1528
1529 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1530 {
1531         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1532                 qed_configure_filter_rx_mode(eth_dev,
1533                                 QED_FILTER_RX_MODE_TYPE_PROMISC);
1534         else
1535                 qed_configure_filter_rx_mode(eth_dev,
1536                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1537 }
1538
1539 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1540                               struct rte_eth_fc_conf *fc_conf)
1541 {
1542         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1543         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1544         struct qed_link_output current_link;
1545         struct qed_link_params params;
1546
1547         memset(&current_link, 0, sizeof(current_link));
1548         qdev->ops->common->get_link(edev, &current_link);
1549
1550         memset(&params, 0, sizeof(params));
1551         params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1552         if (fc_conf->autoneg) {
1553                 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1554                         DP_ERR(edev, "Autoneg not supported\n");
1555                         return -EINVAL;
1556                 }
1557                 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1558         }
1559
1560         /* Pause is assumed to be supported (SUPPORTED_Pause) */
1561         if (fc_conf->mode == RTE_FC_FULL)
1562                 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1563                                         QED_LINK_PAUSE_RX_ENABLE);
1564         if (fc_conf->mode == RTE_FC_TX_PAUSE)
1565                 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1566         if (fc_conf->mode == RTE_FC_RX_PAUSE)
1567                 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1568
1569         params.link_up = true;
1570         (void)qdev->ops->common->set_link(edev, &params);
1571
1572         return 0;
1573 }
1574
1575 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1576                               struct rte_eth_fc_conf *fc_conf)
1577 {
1578         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1579         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1580         struct qed_link_output current_link;
1581
1582         memset(&current_link, 0, sizeof(current_link));
1583         qdev->ops->common->get_link(edev, &current_link);
1584
1585         if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1586                 fc_conf->autoneg = true;
1587
1588         if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1589                                          QED_LINK_PAUSE_TX_ENABLE))
1590                 fc_conf->mode = RTE_FC_FULL;
1591         else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1592                 fc_conf->mode = RTE_FC_RX_PAUSE;
1593         else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1594                 fc_conf->mode = RTE_FC_TX_PAUSE;
1595         else
1596                 fc_conf->mode = RTE_FC_NONE;
1597
1598         return 0;
1599 }
1600
1601 static const uint32_t *
1602 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1603 {
1604         static const uint32_t ptypes[] = {
1605                 RTE_PTYPE_L3_IPV4,
1606                 RTE_PTYPE_L3_IPV6,
1607                 RTE_PTYPE_UNKNOWN
1608         };
1609
1610         if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1611                 return ptypes;
1612
1613         return NULL;
1614 }
1615
1616 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1617 {
1618         *rss_caps = 0;
1619         *rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
1620         *rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
1621         *rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
1622         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
1623         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
1624         *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
1625         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
1626         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
1627 }
1628
1629 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1630                          struct rte_eth_rss_conf *rss_conf)
1631 {
1632         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1633         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1634         struct ecore_sp_vport_update_params vport_update_params;
1635         struct ecore_rss_params rss_params;
1636         struct ecore_hwfn *p_hwfn;
1637         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1638         uint64_t hf = rss_conf->rss_hf;
1639         uint8_t len = rss_conf->rss_key_len;
1640         uint8_t idx;
1641         uint8_t i;
1642         int rc;
1643
1644         memset(&vport_update_params, 0, sizeof(vport_update_params));
1645         memset(&rss_params, 0, sizeof(rss_params));
1646
1647         DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
1648                 (unsigned long)hf, len, key);
1649
1650         if (hf != 0) {
1651                 /* Enabling RSS */
1652                 DP_INFO(edev, "Enabling rss\n");
1653
1654                 /* RSS caps */
1655                 qede_init_rss_caps(&rss_params.rss_caps, hf);
1656                 rss_params.update_rss_capabilities = 1;
1657
1658                 /* RSS hash key */
1659                 if (key) {
1660                         if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
1661                                 DP_ERR(edev, "RSS key length exceeds limit\n");
1662                                 return -EINVAL;
1663                         }
1664                         DP_INFO(edev, "Applying user supplied hash key\n");
1665                         rss_params.update_rss_key = 1;
1666                         memcpy(&rss_params.rss_key, key, len);
1667                 }
1668                 rss_params.rss_enable = 1;
1669         }
1670
1671         rss_params.update_rss_config = 1;
1672         /* tbl_size has to be set with capabilities */
1673         rss_params.rss_table_size_log = 7;
1674         vport_update_params.vport_id = 0;
1675         /* pass the L2 handles instead of qids */
1676         for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
1677                 idx = qdev->rss_ind_table[i];
1678                 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
1679         }
1680         vport_update_params.rss_params = &rss_params;
1681
1682         for_each_hwfn(edev, i) {
1683                 p_hwfn = &edev->hwfns[i];
1684                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1685                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
1686                                            ECORE_SPQ_MODE_EBLOCK, NULL);
1687                 if (rc) {
1688                         DP_ERR(edev, "vport-update for RSS failed\n");
1689                         return rc;
1690                 }
1691         }
1692         qdev->rss_enable = rss_params.rss_enable;
1693
1694         /* Update local structure for hash query */
1695         qdev->rss_conf.rss_hf = hf;
1696         qdev->rss_conf.rss_key_len = len;
1697         if (qdev->rss_enable) {
1698                 if  (qdev->rss_conf.rss_key == NULL) {
1699                         qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
1700                         if (qdev->rss_conf.rss_key == NULL) {
1701                                 DP_ERR(edev, "No memory to store RSS key\n");
1702                                 return -ENOMEM;
1703                         }
1704                 }
1705                 if (key && len) {
1706                         DP_INFO(edev, "Storing RSS key\n");
1707                         memcpy(qdev->rss_conf.rss_key, key, len);
1708                 }
1709         } else if (!qdev->rss_enable && len == 0) {
1710                 if (qdev->rss_conf.rss_key) {
1711                         free(qdev->rss_conf.rss_key);
1712                         qdev->rss_conf.rss_key = NULL;
1713                         DP_INFO(edev, "Free RSS key\n");
1714                 }
1715         }
1716
1717         return 0;
1718 }
1719
1720 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
1721                            struct rte_eth_rss_conf *rss_conf)
1722 {
1723         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1724
1725         rss_conf->rss_hf = qdev->rss_conf.rss_hf;
1726         rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
1727
1728         if (rss_conf->rss_key && qdev->rss_conf.rss_key)
1729                 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
1730                        rss_conf->rss_key_len);
1731         return 0;
1732 }
1733
1734 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
1735                                     struct ecore_rss_params *rss)
1736 {
1737         int i, fn;
1738         bool rss_mode = 1; /* enable */
1739         struct ecore_queue_cid *cid;
1740         struct ecore_rss_params *t_rss;
1741
1742         /* In regular scenario, we'd simply need to take input handlers.
1743          * But in CMT, we'd have to split the handlers according to the
1744          * engine they were configured on. We'd then have to understand
1745          * whether RSS is really required, since 2-queues on CMT doesn't
1746          * require RSS.
1747          */
1748
1749         /* CMT should be round-robin */
1750         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1751                 cid = rss->rss_ind_table[i];
1752
1753                 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
1754                         t_rss = &rss[0];
1755                 else
1756                         t_rss = &rss[1];
1757
1758                 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
1759         }
1760
1761         t_rss = &rss[1];
1762         t_rss->update_rss_ind_table = 1;
1763         t_rss->rss_table_size_log = 7;
1764         t_rss->update_rss_config = 1;
1765
1766         /* Make sure RSS is actually required */
1767         for_each_hwfn(edev, fn) {
1768                 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
1769                      i++) {
1770                         if (rss[fn].rss_ind_table[i] !=
1771                             rss[fn].rss_ind_table[0])
1772                                 break;
1773                 }
1774
1775                 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
1776                         DP_INFO(edev,
1777                                 "CMT - 1 queue per-hwfn; Disabling RSS\n");
1778                         rss_mode = 0;
1779                         goto out;
1780                 }
1781         }
1782
1783 out:
1784         t_rss->rss_enable = rss_mode;
1785
1786         return rss_mode;
1787 }
1788
1789 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
1790                          struct rte_eth_rss_reta_entry64 *reta_conf,
1791                          uint16_t reta_size)
1792 {
1793         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1794         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1795         struct ecore_sp_vport_update_params vport_update_params;
1796         struct ecore_rss_params *params;
1797         struct ecore_hwfn *p_hwfn;
1798         uint16_t i, idx, shift;
1799         uint8_t entry;
1800         int rc = 0;
1801
1802         if (reta_size > ETH_RSS_RETA_SIZE_128) {
1803                 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
1804                        reta_size);
1805                 return -EINVAL;
1806         }
1807
1808         memset(&vport_update_params, 0, sizeof(vport_update_params));
1809         params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
1810                              RTE_CACHE_LINE_SIZE);
1811
1812         for (i = 0; i < reta_size; i++) {
1813                 idx = i / RTE_RETA_GROUP_SIZE;
1814                 shift = i % RTE_RETA_GROUP_SIZE;
1815                 if (reta_conf[idx].mask & (1ULL << shift)) {
1816                         entry = reta_conf[idx].reta[shift];
1817                         /* Pass rxq handles to ecore */
1818                         params->rss_ind_table[i] =
1819                                         qdev->fp_array[entry].rxq->handle;
1820                         /* Update the local copy for RETA query command */
1821                         qdev->rss_ind_table[i] = entry;
1822                 }
1823         }
1824
1825         params->update_rss_ind_table = 1;
1826         params->rss_table_size_log = 7;
1827         params->update_rss_config = 1;
1828
1829         /* Fix up RETA for CMT mode device */
1830         if (edev->num_hwfns > 1)
1831                 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
1832                                                             params);
1833         vport_update_params.vport_id = 0;
1834         /* Use the current value of rss_enable */
1835         params->rss_enable = qdev->rss_enable;
1836         vport_update_params.rss_params = params;
1837
1838         for_each_hwfn(edev, i) {
1839                 p_hwfn = &edev->hwfns[i];
1840                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1841                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
1842                                            ECORE_SPQ_MODE_EBLOCK, NULL);
1843                 if (rc) {
1844                         DP_ERR(edev, "vport-update for RSS failed\n");
1845                         goto out;
1846                 }
1847         }
1848
1849 out:
1850         rte_free(params);
1851         return rc;
1852 }
1853
1854 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
1855                                struct rte_eth_rss_reta_entry64 *reta_conf,
1856                                uint16_t reta_size)
1857 {
1858         struct qede_dev *qdev = eth_dev->data->dev_private;
1859         struct ecore_dev *edev = &qdev->edev;
1860         uint16_t i, idx, shift;
1861         uint8_t entry;
1862
1863         if (reta_size > ETH_RSS_RETA_SIZE_128) {
1864                 DP_ERR(edev, "reta_size %d is not supported\n",
1865                        reta_size);
1866                 return -EINVAL;
1867         }
1868
1869         for (i = 0; i < reta_size; i++) {
1870                 idx = i / RTE_RETA_GROUP_SIZE;
1871                 shift = i % RTE_RETA_GROUP_SIZE;
1872                 if (reta_conf[idx].mask & (1ULL << shift)) {
1873                         entry = qdev->rss_ind_table[i];
1874                         reta_conf[idx].reta[shift] = entry;
1875                 }
1876         }
1877
1878         return 0;
1879 }
1880
1881 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1882 {
1883         struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
1884         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1885         struct rte_eth_dev_info dev_info = {0};
1886         struct qede_fastpath *fp;
1887         uint32_t frame_size;
1888         uint16_t rx_buf_size;
1889         uint16_t bufsz;
1890         int i;
1891
1892         PMD_INIT_FUNC_TRACE(edev);
1893         qede_dev_info_get(dev, &dev_info);
1894         frame_size = mtu + QEDE_ETH_OVERHEAD;
1895         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
1896                 DP_ERR(edev, "MTU %u out of range\n", mtu);
1897                 return -EINVAL;
1898         }
1899         if (!dev->data->scattered_rx &&
1900             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
1901                 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
1902                         dev->data->min_rx_buf_size);
1903                 return -EINVAL;
1904         }
1905         /* Temporarily replace I/O functions with dummy ones. It cannot
1906          * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
1907          */
1908         dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
1909         dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
1910         qede_dev_stop(dev);
1911         rte_delay_ms(1000);
1912         qdev->mtu = mtu;
1913         /* Fix up RX buf size for all queues of the port */
1914         for_each_queue(i) {
1915                 fp = &qdev->fp_array[i];
1916                 if (fp->type & QEDE_FASTPATH_RX) {
1917                         bufsz = (uint16_t)rte_pktmbuf_data_room_size(
1918                                 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
1919                         if (dev->data->scattered_rx)
1920                                 rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
1921                         else
1922                                 rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
1923                         rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
1924                         fp->rxq->rx_buf_size = rx_buf_size;
1925                         DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
1926                 }
1927         }
1928         qede_dev_start(dev);
1929         if (frame_size > ETHER_MAX_LEN)
1930                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1931         else
1932                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1933         /* update max frame size */
1934         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1935         /* Reassign back */
1936         dev->rx_pkt_burst = qede_recv_pkts;
1937         dev->tx_pkt_burst = qede_xmit_pkts;
1938
1939         return 0;
1940 }
1941
1942 static int
1943 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
1944                        struct rte_eth_udp_tunnel *tunnel_udp,
1945                        bool add)
1946 {
1947         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1948         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1949         struct ecore_tunnel_info tunn; /* @DPDK */
1950         struct ecore_hwfn *p_hwfn;
1951         int rc, i;
1952
1953         PMD_INIT_FUNC_TRACE(edev);
1954
1955         memset(&tunn, 0, sizeof(tunn));
1956         if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
1957                 tunn.vxlan_port.b_update_port = true;
1958                 tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
1959                                                   QEDE_VXLAN_DEF_PORT;
1960                 for_each_hwfn(edev, i) {
1961                         p_hwfn = &edev->hwfns[i];
1962                         rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
1963                                                 ECORE_SPQ_MODE_CB, NULL);
1964                         if (rc != ECORE_SUCCESS) {
1965                                 DP_ERR(edev, "Unable to config UDP port %u\n",
1966                                        tunn.vxlan_port.port);
1967                                 return rc;
1968                         }
1969                 }
1970         }
1971
1972         return 0;
1973 }
1974
1975 static int
1976 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
1977                       struct rte_eth_udp_tunnel *tunnel_udp)
1978 {
1979         return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
1980 }
1981
1982 static int
1983 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
1984                       struct rte_eth_udp_tunnel *tunnel_udp)
1985 {
1986         return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
1987 }
1988
1989 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
1990                                        uint32_t *clss, char *str)
1991 {
1992         uint16_t j;
1993         *clss = MAX_ECORE_TUNN_CLSS;
1994
1995         for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
1996                 if (filter == qede_tunn_types[j].rte_filter_type) {
1997                         *type = qede_tunn_types[j].qede_type;
1998                         *clss = qede_tunn_types[j].qede_tunn_clss;
1999                         strcpy(str, qede_tunn_types[j].string);
2000                         return;
2001                 }
2002         }
2003 }
2004
2005 static int
2006 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
2007                               const struct rte_eth_tunnel_filter_conf *conf,
2008                               uint32_t type)
2009 {
2010         /* Init commmon ucast params first */
2011         qede_set_ucast_cmn_params(ucast);
2012
2013         /* Copy out the required fields based on classification type */
2014         ucast->type = type;
2015
2016         switch (type) {
2017         case ECORE_FILTER_VNI:
2018                 ucast->vni = conf->tenant_id;
2019         break;
2020         case ECORE_FILTER_INNER_VLAN:
2021                 ucast->vlan = conf->inner_vlan;
2022         break;
2023         case ECORE_FILTER_MAC:
2024                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2025                        ETHER_ADDR_LEN);
2026         break;
2027         case ECORE_FILTER_INNER_MAC:
2028                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2029                        ETHER_ADDR_LEN);
2030         break;
2031         case ECORE_FILTER_MAC_VNI_PAIR:
2032                 memcpy(ucast->mac, conf->outer_mac.addr_bytes,
2033                         ETHER_ADDR_LEN);
2034                 ucast->vni = conf->tenant_id;
2035         break;
2036         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
2037                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2038                         ETHER_ADDR_LEN);
2039                 ucast->vni = conf->tenant_id;
2040         break;
2041         case ECORE_FILTER_INNER_PAIR:
2042                 memcpy(ucast->mac, conf->inner_mac.addr_bytes,
2043                         ETHER_ADDR_LEN);
2044                 ucast->vlan = conf->inner_vlan;
2045         break;
2046         default:
2047                 return -EINVAL;
2048         }
2049
2050         return ECORE_SUCCESS;
2051 }
2052
2053 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
2054                                   enum rte_filter_op filter_op,
2055                                   const struct rte_eth_tunnel_filter_conf *conf)
2056 {
2057         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2058         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2059         struct ecore_tunnel_info tunn;
2060         struct ecore_hwfn *p_hwfn;
2061         enum ecore_filter_ucast_type type;
2062         enum ecore_tunn_clss clss;
2063         struct ecore_filter_ucast ucast;
2064         char str[80];
2065         uint16_t filter_type;
2066         int rc, i;
2067
2068         filter_type = conf->filter_type | qdev->vxlan_filter_type;
2069         /* First determine if the given filter classification is supported */
2070         qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
2071         if (clss == MAX_ECORE_TUNN_CLSS) {
2072                 DP_ERR(edev, "Wrong filter type\n");
2073                 return -EINVAL;
2074         }
2075         /* Init tunnel ucast params */
2076         rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
2077         if (rc != ECORE_SUCCESS) {
2078                 DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
2079                                 conf->filter_type);
2080                 return rc;
2081         }
2082         DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
2083                 str, filter_op, ucast.type);
2084         switch (filter_op) {
2085         case RTE_ETH_FILTER_ADD:
2086                 ucast.opcode = ECORE_FILTER_ADD;
2087
2088                 /* Skip MAC/VLAN if filter is based on VNI */
2089                 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2090                         rc = qede_mac_int_ops(eth_dev, &ucast, 1);
2091                         if (rc == 0) {
2092                                 /* Enable accept anyvlan */
2093                                 qede_config_accept_any_vlan(qdev, true);
2094                         }
2095                 } else {
2096                         rc = qede_ucast_filter(eth_dev, &ucast, 1);
2097                         if (rc == 0)
2098                                 rc = ecore_filter_ucast_cmd(edev, &ucast,
2099                                                     ECORE_SPQ_MODE_CB, NULL);
2100                 }
2101
2102                 if (rc != ECORE_SUCCESS)
2103                         return rc;
2104
2105                 qdev->vxlan_filter_type = filter_type;
2106
2107                 DP_INFO(edev, "Enabling VXLAN tunneling\n");
2108                 qede_set_cmn_tunn_param(&tunn, clss, true, true);
2109                 for_each_hwfn(edev, i) {
2110                         p_hwfn = &edev->hwfns[i];
2111                         rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
2112                                 &tunn, ECORE_SPQ_MODE_CB, NULL);
2113                         if (rc != ECORE_SUCCESS) {
2114                                 DP_ERR(edev, "Failed to update tunn_clss %u\n",
2115                                        tunn.vxlan.tun_cls);
2116                         }
2117                 }
2118                 qdev->num_tunn_filters++; /* Filter added successfully */
2119         break;
2120         case RTE_ETH_FILTER_DELETE:
2121                 ucast.opcode = ECORE_FILTER_REMOVE;
2122
2123                 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
2124                         rc = qede_mac_int_ops(eth_dev, &ucast, 0);
2125                 } else {
2126                         rc = qede_ucast_filter(eth_dev, &ucast, 0);
2127                         if (rc == 0)
2128                                 rc = ecore_filter_ucast_cmd(edev, &ucast,
2129                                                     ECORE_SPQ_MODE_CB, NULL);
2130                 }
2131                 if (rc != ECORE_SUCCESS)
2132                         return rc;
2133
2134                 qdev->vxlan_filter_type = filter_type;
2135                 qdev->num_tunn_filters--;
2136
2137                 /* Disable VXLAN if VXLAN filters become 0 */
2138                 if (qdev->num_tunn_filters == 0) {
2139                         DP_INFO(edev, "Disabling VXLAN tunneling\n");
2140
2141                         /* Use 0 as tunnel mode */
2142                         qede_set_cmn_tunn_param(&tunn, clss, false, true);
2143                         for_each_hwfn(edev, i) {
2144                                 p_hwfn = &edev->hwfns[i];
2145                                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
2146                                         ECORE_SPQ_MODE_CB, NULL);
2147                                 if (rc != ECORE_SUCCESS) {
2148                                         DP_ERR(edev,
2149                                                 "Failed to update tunn_clss %u\n",
2150                                                 tunn.vxlan.tun_cls);
2151                                         break;
2152                                 }
2153                         }
2154                 }
2155         break;
2156         default:
2157                 DP_ERR(edev, "Unsupported operation %d\n", filter_op);
2158                 return -EINVAL;
2159         }
2160         DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
2161
2162         return 0;
2163 }
2164
2165 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
2166                          enum rte_filter_type filter_type,
2167                          enum rte_filter_op filter_op,
2168                          void *arg)
2169 {
2170         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2171         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2172         struct rte_eth_tunnel_filter_conf *filter_conf =
2173                         (struct rte_eth_tunnel_filter_conf *)arg;
2174
2175         switch (filter_type) {
2176         case RTE_ETH_FILTER_TUNNEL:
2177                 switch (filter_conf->tunnel_type) {
2178                 case RTE_TUNNEL_TYPE_VXLAN:
2179                         DP_INFO(edev,
2180                                 "Packet steering to the specified Rx queue"
2181                                 " is not supported with VXLAN tunneling");
2182                         return(qede_vxlan_tunn_config(eth_dev, filter_op,
2183                                                       filter_conf));
2184                 /* Place holders for future tunneling support */
2185                 case RTE_TUNNEL_TYPE_GENEVE:
2186                 case RTE_TUNNEL_TYPE_TEREDO:
2187                 case RTE_TUNNEL_TYPE_NVGRE:
2188                 case RTE_TUNNEL_TYPE_IP_IN_GRE:
2189                 case RTE_L2_TUNNEL_TYPE_E_TAG:
2190                         DP_ERR(edev, "Unsupported tunnel type %d\n",
2191                                 filter_conf->tunnel_type);
2192                         return -EINVAL;
2193                 case RTE_TUNNEL_TYPE_NONE:
2194                 default:
2195                         return 0;
2196                 }
2197                 break;
2198         case RTE_ETH_FILTER_FDIR:
2199                 return qede_fdir_filter_conf(eth_dev, filter_op, arg);
2200         case RTE_ETH_FILTER_NTUPLE:
2201                 return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
2202         case RTE_ETH_FILTER_MACVLAN:
2203         case RTE_ETH_FILTER_ETHERTYPE:
2204         case RTE_ETH_FILTER_FLEXIBLE:
2205         case RTE_ETH_FILTER_SYN:
2206         case RTE_ETH_FILTER_HASH:
2207         case RTE_ETH_FILTER_L2_TUNNEL:
2208         case RTE_ETH_FILTER_MAX:
2209         default:
2210                 DP_ERR(edev, "Unsupported filter type %d\n",
2211                         filter_type);
2212                 return -EINVAL;
2213         }
2214
2215         return 0;
2216 }
2217
2218 static const struct eth_dev_ops qede_eth_dev_ops = {
2219         .dev_configure = qede_dev_configure,
2220         .dev_infos_get = qede_dev_info_get,
2221         .rx_queue_setup = qede_rx_queue_setup,
2222         .rx_queue_release = qede_rx_queue_release,
2223         .tx_queue_setup = qede_tx_queue_setup,
2224         .tx_queue_release = qede_tx_queue_release,
2225         .dev_start = qede_dev_start,
2226         .dev_set_link_up = qede_dev_set_link_up,
2227         .dev_set_link_down = qede_dev_set_link_down,
2228         .link_update = qede_link_update,
2229         .promiscuous_enable = qede_promiscuous_enable,
2230         .promiscuous_disable = qede_promiscuous_disable,
2231         .allmulticast_enable = qede_allmulticast_enable,
2232         .allmulticast_disable = qede_allmulticast_disable,
2233         .dev_stop = qede_dev_stop,
2234         .dev_close = qede_dev_close,
2235         .stats_get = qede_get_stats,
2236         .stats_reset = qede_reset_stats,
2237         .xstats_get = qede_get_xstats,
2238         .xstats_reset = qede_reset_xstats,
2239         .xstats_get_names = qede_get_xstats_names,
2240         .mac_addr_add = qede_mac_addr_add,
2241         .mac_addr_remove = qede_mac_addr_remove,
2242         .mac_addr_set = qede_mac_addr_set,
2243         .vlan_offload_set = qede_vlan_offload_set,
2244         .vlan_filter_set = qede_vlan_filter_set,
2245         .flow_ctrl_set = qede_flow_ctrl_set,
2246         .flow_ctrl_get = qede_flow_ctrl_get,
2247         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2248         .rss_hash_update = qede_rss_hash_update,
2249         .rss_hash_conf_get = qede_rss_hash_conf_get,
2250         .reta_update  = qede_rss_reta_update,
2251         .reta_query  = qede_rss_reta_query,
2252         .mtu_set = qede_set_mtu,
2253         .filter_ctrl = qede_dev_filter_ctrl,
2254         .udp_tunnel_port_add = qede_udp_dst_port_add,
2255         .udp_tunnel_port_del = qede_udp_dst_port_del,
2256 };
2257
2258 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2259         .dev_configure = qede_dev_configure,
2260         .dev_infos_get = qede_dev_info_get,
2261         .rx_queue_setup = qede_rx_queue_setup,
2262         .rx_queue_release = qede_rx_queue_release,
2263         .tx_queue_setup = qede_tx_queue_setup,
2264         .tx_queue_release = qede_tx_queue_release,
2265         .dev_start = qede_dev_start,
2266         .dev_set_link_up = qede_dev_set_link_up,
2267         .dev_set_link_down = qede_dev_set_link_down,
2268         .link_update = qede_link_update,
2269         .promiscuous_enable = qede_promiscuous_enable,
2270         .promiscuous_disable = qede_promiscuous_disable,
2271         .allmulticast_enable = qede_allmulticast_enable,
2272         .allmulticast_disable = qede_allmulticast_disable,
2273         .dev_stop = qede_dev_stop,
2274         .dev_close = qede_dev_close,
2275         .stats_get = qede_get_stats,
2276         .stats_reset = qede_reset_stats,
2277         .xstats_get = qede_get_xstats,
2278         .xstats_reset = qede_reset_xstats,
2279         .xstats_get_names = qede_get_xstats_names,
2280         .vlan_offload_set = qede_vlan_offload_set,
2281         .vlan_filter_set = qede_vlan_filter_set,
2282         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2283         .rss_hash_update = qede_rss_hash_update,
2284         .rss_hash_conf_get = qede_rss_hash_conf_get,
2285         .reta_update  = qede_rss_reta_update,
2286         .reta_query  = qede_rss_reta_query,
2287         .mtu_set = qede_set_mtu,
2288 };
2289
2290 static void qede_update_pf_params(struct ecore_dev *edev)
2291 {
2292         struct ecore_pf_params pf_params;
2293
2294         memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2295         pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2296         pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2297         qed_ops->common->update_pf_params(edev, &pf_params);
2298 }
2299
2300 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2301 {
2302         struct rte_pci_device *pci_dev;
2303         struct rte_pci_addr pci_addr;
2304         struct qede_dev *adapter;
2305         struct ecore_dev *edev;
2306         struct qed_dev_eth_info dev_info;
2307         struct qed_slowpath_params params;
2308         static bool do_once = true;
2309         uint8_t bulletin_change;
2310         uint8_t vf_mac[ETHER_ADDR_LEN];
2311         uint8_t is_mac_forced;
2312         bool is_mac_exist;
2313         /* Fix up ecore debug level */
2314         uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2315         uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2316         int rc;
2317
2318         /* Extract key data structures */
2319         adapter = eth_dev->data->dev_private;
2320         edev = &adapter->edev;
2321         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2322         pci_addr = pci_dev->addr;
2323
2324         PMD_INIT_FUNC_TRACE(edev);
2325
2326         snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2327                  pci_addr.bus, pci_addr.devid, pci_addr.function,
2328                  eth_dev->data->port_id);
2329
2330         eth_dev->rx_pkt_burst = qede_recv_pkts;
2331         eth_dev->tx_pkt_burst = qede_xmit_pkts;
2332         eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2333
2334         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2335                 DP_NOTICE(edev, false,
2336                           "Skipping device init from secondary process\n");
2337                 return 0;
2338         }
2339
2340         rte_eth_copy_pci_info(eth_dev, pci_dev);
2341
2342         /* @DPDK */
2343         edev->vendor_id = pci_dev->id.vendor_id;
2344         edev->device_id = pci_dev->id.device_id;
2345
2346         qed_ops = qed_get_eth_ops();
2347         if (!qed_ops) {
2348                 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2349                 return -EINVAL;
2350         }
2351
2352         DP_INFO(edev, "Starting qede probe\n");
2353
2354         rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
2355                                     dp_module, dp_level, is_vf);
2356
2357         if (rc != 0) {
2358                 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2359                 return -ENODEV;
2360         }
2361
2362         qede_update_pf_params(edev);
2363
2364         rte_intr_callback_register(&pci_dev->intr_handle,
2365                                    qede_interrupt_handler, (void *)eth_dev);
2366
2367         if (rte_intr_enable(&pci_dev->intr_handle)) {
2368                 DP_ERR(edev, "rte_intr_enable() failed\n");
2369                 return -ENODEV;
2370         }
2371
2372         /* Start the Slowpath-process */
2373         memset(&params, 0, sizeof(struct qed_slowpath_params));
2374         params.int_mode = ECORE_INT_MODE_MSIX;
2375         params.drv_major = QEDE_PMD_VERSION_MAJOR;
2376         params.drv_minor = QEDE_PMD_VERSION_MINOR;
2377         params.drv_rev = QEDE_PMD_VERSION_REVISION;
2378         params.drv_eng = QEDE_PMD_VERSION_PATCH;
2379         strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2380                 QEDE_PMD_DRV_VER_STR_SIZE);
2381
2382         /* For CMT mode device do periodic polling for slowpath events.
2383          * This is required since uio device uses only one MSI-x
2384          * interrupt vector but we need one for each engine.
2385          */
2386         if (edev->num_hwfns > 1 && IS_PF(edev)) {
2387                 rc = rte_eal_alarm_set(timer_period * US_PER_S,
2388                                        qede_poll_sp_sb_cb,
2389                                        (void *)eth_dev);
2390                 if (rc != 0) {
2391                         DP_ERR(edev, "Unable to start periodic"
2392                                      " timer rc %d\n", rc);
2393                         return -EINVAL;
2394                 }
2395         }
2396
2397         rc = qed_ops->common->slowpath_start(edev, &params);
2398         if (rc) {
2399                 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2400                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2401                                      (void *)eth_dev);
2402                 return -ENODEV;
2403         }
2404
2405         rc = qed_ops->fill_dev_info(edev, &dev_info);
2406         if (rc) {
2407                 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2408                 qed_ops->common->slowpath_stop(edev);
2409                 qed_ops->common->remove(edev);
2410                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2411                                      (void *)eth_dev);
2412                 return -ENODEV;
2413         }
2414
2415         qede_alloc_etherdev(adapter, &dev_info);
2416
2417         adapter->ops->common->set_name(edev, edev->name);
2418
2419         if (!is_vf)
2420                 adapter->dev_info.num_mac_filters =
2421                         (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2422                                             ECORE_MAC);
2423         else
2424                 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2425                                 (uint32_t *)&adapter->dev_info.num_mac_filters);
2426
2427         /* Allocate memory for storing MAC addr */
2428         eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2429                                         (ETHER_ADDR_LEN *
2430                                         adapter->dev_info.num_mac_filters),
2431                                         RTE_CACHE_LINE_SIZE);
2432
2433         if (eth_dev->data->mac_addrs == NULL) {
2434                 DP_ERR(edev, "Failed to allocate MAC address\n");
2435                 qed_ops->common->slowpath_stop(edev);
2436                 qed_ops->common->remove(edev);
2437                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2438                                      (void *)eth_dev);
2439                 return -ENOMEM;
2440         }
2441
2442         if (!is_vf) {
2443                 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
2444                                 hw_info.hw_mac_addr,
2445                                 &eth_dev->data->mac_addrs[0]);
2446                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2447                                 &adapter->primary_mac);
2448         } else {
2449                 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2450                                        &bulletin_change);
2451                 if (bulletin_change) {
2452                         is_mac_exist =
2453                             ecore_vf_bulletin_get_forced_mac(
2454                                                 ECORE_LEADING_HWFN(edev),
2455                                                 vf_mac,
2456                                                 &is_mac_forced);
2457                         if (is_mac_exist && is_mac_forced) {
2458                                 DP_INFO(edev, "VF macaddr received from PF\n");
2459                                 ether_addr_copy((struct ether_addr *)&vf_mac,
2460                                                 &eth_dev->data->mac_addrs[0]);
2461                                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2462                                                 &adapter->primary_mac);
2463                         } else {
2464                                 DP_NOTICE(edev, false,
2465                                           "No VF macaddr assigned\n");
2466                         }
2467                 }
2468         }
2469
2470         eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2471
2472         if (do_once) {
2473 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
2474                 qede_print_adapter_info(adapter);
2475 #endif
2476                 do_once = false;
2477         }
2478
2479         adapter->state = QEDE_DEV_INIT;
2480         adapter->mtu = ETHER_MTU;
2481         adapter->new_mtu = ETHER_MTU;
2482         if (!is_vf)
2483                 if (qede_start_vport(adapter, adapter->mtu))
2484                         return -1;
2485
2486         DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2487                   adapter->primary_mac.addr_bytes[0],
2488                   adapter->primary_mac.addr_bytes[1],
2489                   adapter->primary_mac.addr_bytes[2],
2490                   adapter->primary_mac.addr_bytes[3],
2491                   adapter->primary_mac.addr_bytes[4],
2492                   adapter->primary_mac.addr_bytes[5]);
2493
2494         return rc;
2495 }
2496
2497 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2498 {
2499         return qede_common_dev_init(eth_dev, 1);
2500 }
2501
2502 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2503 {
2504         return qede_common_dev_init(eth_dev, 0);
2505 }
2506
2507 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2508 {
2509         /* only uninitialize in the primary process */
2510         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2511                 return 0;
2512
2513         /* safe to close dev here */
2514         qede_dev_close(eth_dev);
2515
2516         eth_dev->dev_ops = NULL;
2517         eth_dev->rx_pkt_burst = NULL;
2518         eth_dev->tx_pkt_burst = NULL;
2519
2520         if (eth_dev->data->mac_addrs)
2521                 rte_free(eth_dev->data->mac_addrs);
2522
2523         eth_dev->data->mac_addrs = NULL;
2524
2525         return 0;
2526 }
2527
2528 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2529 {
2530         return qede_dev_common_uninit(eth_dev);
2531 }
2532
2533 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2534 {
2535         return qede_dev_common_uninit(eth_dev);
2536 }
2537
2538 static const struct rte_pci_id pci_id_qedevf_map[] = {
2539 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2540         {
2541                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
2542         },
2543         {
2544                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
2545         },
2546         {
2547                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
2548         },
2549         {.vendor_id = 0,}
2550 };
2551
2552 static const struct rte_pci_id pci_id_qede_map[] = {
2553 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2554         {
2555                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
2556         },
2557         {
2558                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
2559         },
2560         {
2561                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
2562         },
2563         {
2564                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
2565         },
2566         {
2567                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
2568         },
2569         {
2570                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
2571         },
2572         {
2573                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
2574         },
2575         {
2576                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
2577         },
2578         {
2579                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
2580         },
2581         {
2582                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
2583         },
2584         {.vendor_id = 0,}
2585 };
2586
2587 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2588         struct rte_pci_device *pci_dev)
2589 {
2590         return rte_eth_dev_pci_generic_probe(pci_dev,
2591                 sizeof(struct qede_dev), qedevf_eth_dev_init);
2592 }
2593
2594 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2595 {
2596         return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
2597 }
2598
2599 static struct rte_pci_driver rte_qedevf_pmd = {
2600         .id_table = pci_id_qedevf_map,
2601         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2602         .probe = qedevf_eth_dev_pci_probe,
2603         .remove = qedevf_eth_dev_pci_remove,
2604 };
2605
2606 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2607         struct rte_pci_device *pci_dev)
2608 {
2609         return rte_eth_dev_pci_generic_probe(pci_dev,
2610                 sizeof(struct qede_dev), qede_eth_dev_init);
2611 }
2612
2613 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2614 {
2615         return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
2616 }
2617
2618 static struct rte_pci_driver rte_qede_pmd = {
2619         .id_table = pci_id_qede_map,
2620         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2621         .probe = qede_eth_dev_pci_probe,
2622         .remove = qede_eth_dev_pci_remove,
2623 };
2624
2625 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
2626 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
2627 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
2628 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
2629 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
2630 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");