net/qede: support device reset
[dpdk.git] / drivers / net / qede / qede_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include "qede_ethdev.h"
8 #include <rte_alarm.h>
9 #include <rte_version.h>
10 #include <rte_kvargs.h>
11
12 /* Globals */
13 int qede_logtype_init;
14 int qede_logtype_driver;
15
16 static const struct qed_eth_ops *qed_ops;
17 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
19
20 #define QEDE_SP_TIMER_PERIOD    10000 /* 100ms */
21
22 struct rte_qede_xstats_name_off {
23         char name[RTE_ETH_XSTATS_NAME_SIZE];
24         uint64_t offset;
25 };
26
27 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
28         {"rx_unicast_bytes",
29                 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
30         {"rx_multicast_bytes",
31                 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
32         {"rx_broadcast_bytes",
33                 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
34         {"rx_unicast_packets",
35                 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
36         {"rx_multicast_packets",
37                 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
38         {"rx_broadcast_packets",
39                 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
40
41         {"tx_unicast_bytes",
42                 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
43         {"tx_multicast_bytes",
44                 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
45         {"tx_broadcast_bytes",
46                 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
47         {"tx_unicast_packets",
48                 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
49         {"tx_multicast_packets",
50                 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
51         {"tx_broadcast_packets",
52                 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
53
54         {"rx_64_byte_packets",
55                 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
56         {"rx_65_to_127_byte_packets",
57                 offsetof(struct ecore_eth_stats_common,
58                          rx_65_to_127_byte_packets)},
59         {"rx_128_to_255_byte_packets",
60                 offsetof(struct ecore_eth_stats_common,
61                          rx_128_to_255_byte_packets)},
62         {"rx_256_to_511_byte_packets",
63                 offsetof(struct ecore_eth_stats_common,
64                          rx_256_to_511_byte_packets)},
65         {"rx_512_to_1023_byte_packets",
66                 offsetof(struct ecore_eth_stats_common,
67                          rx_512_to_1023_byte_packets)},
68         {"rx_1024_to_1518_byte_packets",
69                 offsetof(struct ecore_eth_stats_common,
70                          rx_1024_to_1518_byte_packets)},
71         {"tx_64_byte_packets",
72                 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
73         {"tx_65_to_127_byte_packets",
74                 offsetof(struct ecore_eth_stats_common,
75                          tx_65_to_127_byte_packets)},
76         {"tx_128_to_255_byte_packets",
77                 offsetof(struct ecore_eth_stats_common,
78                          tx_128_to_255_byte_packets)},
79         {"tx_256_to_511_byte_packets",
80                 offsetof(struct ecore_eth_stats_common,
81                          tx_256_to_511_byte_packets)},
82         {"tx_512_to_1023_byte_packets",
83                 offsetof(struct ecore_eth_stats_common,
84                          tx_512_to_1023_byte_packets)},
85         {"tx_1024_to_1518_byte_packets",
86                 offsetof(struct ecore_eth_stats_common,
87                          tx_1024_to_1518_byte_packets)},
88
89         {"rx_mac_crtl_frames",
90                 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
91         {"tx_mac_control_frames",
92                 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
93         {"rx_pause_frames",
94                 offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
95         {"tx_pause_frames",
96                 offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
97         {"rx_priority_flow_control_frames",
98                 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
99         {"tx_priority_flow_control_frames",
100                 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
101
102         {"rx_crc_errors",
103                 offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
104         {"rx_align_errors",
105                 offsetof(struct ecore_eth_stats_common, rx_align_errors)},
106         {"rx_carrier_errors",
107                 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
108         {"rx_oversize_packet_errors",
109                 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
110         {"rx_jabber_errors",
111                 offsetof(struct ecore_eth_stats_common, rx_jabbers)},
112         {"rx_undersize_packet_errors",
113                 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
114         {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
115         {"rx_host_buffer_not_available",
116                 offsetof(struct ecore_eth_stats_common, no_buff_discards)},
117         /* Number of packets discarded because they are bigger than MTU */
118         {"rx_packet_too_big_discards",
119                 offsetof(struct ecore_eth_stats_common,
120                          packet_too_big_discard)},
121         {"rx_ttl_zero_discards",
122                 offsetof(struct ecore_eth_stats_common, ttl0_discard)},
123         {"rx_multi_function_tag_filter_discards",
124                 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
125         {"rx_mac_filter_discards",
126                 offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
127         {"rx_hw_buffer_truncates",
128                 offsetof(struct ecore_eth_stats_common, brb_truncates)},
129         {"rx_hw_buffer_discards",
130                 offsetof(struct ecore_eth_stats_common, brb_discards)},
131         {"tx_error_drop_packets",
132                 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
133
134         {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
135         {"rx_mac_unicast_packets",
136                 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
137         {"rx_mac_multicast_packets",
138                 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
139         {"rx_mac_broadcast_packets",
140                 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
141         {"rx_mac_frames_ok",
142                 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
143         {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
144         {"tx_mac_unicast_packets",
145                 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
146         {"tx_mac_multicast_packets",
147                 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
148         {"tx_mac_broadcast_packets",
149                 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
150
151         {"lro_coalesced_packets",
152                 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
153         {"lro_coalesced_events",
154                 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
155         {"lro_aborts_num",
156                 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
157         {"lro_not_coalesced_packets",
158                 offsetof(struct ecore_eth_stats_common,
159                          tpa_not_coalesced_pkts)},
160         {"lro_coalesced_bytes",
161                 offsetof(struct ecore_eth_stats_common,
162                          tpa_coalesced_bytes)},
163 };
164
165 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
166         {"rx_1519_to_1522_byte_packets",
167                 offsetof(struct ecore_eth_stats, bb) +
168                 offsetof(struct ecore_eth_stats_bb,
169                          rx_1519_to_1522_byte_packets)},
170         {"rx_1519_to_2047_byte_packets",
171                 offsetof(struct ecore_eth_stats, bb) +
172                 offsetof(struct ecore_eth_stats_bb,
173                          rx_1519_to_2047_byte_packets)},
174         {"rx_2048_to_4095_byte_packets",
175                 offsetof(struct ecore_eth_stats, bb) +
176                 offsetof(struct ecore_eth_stats_bb,
177                          rx_2048_to_4095_byte_packets)},
178         {"rx_4096_to_9216_byte_packets",
179                 offsetof(struct ecore_eth_stats, bb) +
180                 offsetof(struct ecore_eth_stats_bb,
181                          rx_4096_to_9216_byte_packets)},
182         {"rx_9217_to_16383_byte_packets",
183                 offsetof(struct ecore_eth_stats, bb) +
184                 offsetof(struct ecore_eth_stats_bb,
185                          rx_9217_to_16383_byte_packets)},
186
187         {"tx_1519_to_2047_byte_packets",
188                 offsetof(struct ecore_eth_stats, bb) +
189                 offsetof(struct ecore_eth_stats_bb,
190                          tx_1519_to_2047_byte_packets)},
191         {"tx_2048_to_4095_byte_packets",
192                 offsetof(struct ecore_eth_stats, bb) +
193                 offsetof(struct ecore_eth_stats_bb,
194                          tx_2048_to_4095_byte_packets)},
195         {"tx_4096_to_9216_byte_packets",
196                 offsetof(struct ecore_eth_stats, bb) +
197                 offsetof(struct ecore_eth_stats_bb,
198                          tx_4096_to_9216_byte_packets)},
199         {"tx_9217_to_16383_byte_packets",
200                 offsetof(struct ecore_eth_stats, bb) +
201                 offsetof(struct ecore_eth_stats_bb,
202                          tx_9217_to_16383_byte_packets)},
203
204         {"tx_lpi_entry_count",
205                 offsetof(struct ecore_eth_stats, bb) +
206                 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
207         {"tx_total_collisions",
208                 offsetof(struct ecore_eth_stats, bb) +
209                 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
210 };
211
212 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
213         {"rx_1519_to_max_byte_packets",
214                 offsetof(struct ecore_eth_stats, ah) +
215                 offsetof(struct ecore_eth_stats_ah,
216                          rx_1519_to_max_byte_packets)},
217         {"tx_1519_to_max_byte_packets",
218                 offsetof(struct ecore_eth_stats, ah) +
219                 offsetof(struct ecore_eth_stats_ah,
220                          tx_1519_to_max_byte_packets)},
221 };
222
223 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
224         {"rx_q_segments",
225                 offsetof(struct qede_rx_queue, rx_segs)},
226         {"rx_q_hw_errors",
227                 offsetof(struct qede_rx_queue, rx_hw_errors)},
228         {"rx_q_allocation_errors",
229                 offsetof(struct qede_rx_queue, rx_alloc_errors)}
230 };
231
232 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
233 {
234         ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
235 }
236
237 static void
238 qede_interrupt_handler_intx(void *param)
239 {
240         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
241         struct qede_dev *qdev = eth_dev->data->dev_private;
242         struct ecore_dev *edev = &qdev->edev;
243         u64 status;
244
245         /* Check if our device actually raised an interrupt */
246         status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
247         if (status & 0x1) {
248                 qede_interrupt_action(ECORE_LEADING_HWFN(edev));
249
250                 if (rte_intr_enable(eth_dev->intr_handle))
251                         DP_ERR(edev, "rte_intr_enable failed\n");
252         }
253 }
254
255 static void
256 qede_interrupt_handler(void *param)
257 {
258         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
259         struct qede_dev *qdev = eth_dev->data->dev_private;
260         struct ecore_dev *edev = &qdev->edev;
261
262         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
263         if (rte_intr_enable(eth_dev->intr_handle))
264                 DP_ERR(edev, "rte_intr_enable failed\n");
265 }
266
267 static void
268 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
269 {
270         rte_memcpy(&qdev->dev_info, info, sizeof(*info));
271         qdev->ops = qed_ops;
272 }
273
274 static void qede_print_adapter_info(struct qede_dev *qdev)
275 {
276         struct ecore_dev *edev = &qdev->edev;
277         struct qed_dev_info *info = &qdev->dev_info.common;
278         static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
279         static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
280
281         DP_INFO(edev, "*********************************\n");
282         DP_INFO(edev, " DPDK version:%s\n", rte_version());
283         DP_INFO(edev, " Chip details : %s %c%d\n",
284                   ECORE_IS_BB(edev) ? "BB" : "AH",
285                   'A' + edev->chip_rev,
286                   (int)edev->chip_metal);
287         snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
288                  info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
289         snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
290                  ver_str, QEDE_PMD_VERSION);
291         DP_INFO(edev, " Driver version : %s\n", drv_ver);
292         DP_INFO(edev, " Firmware version : %s\n", ver_str);
293
294         snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
295                  "%d.%d.%d.%d",
296                 (info->mfw_rev >> 24) & 0xff,
297                 (info->mfw_rev >> 16) & 0xff,
298                 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
299         DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
300         DP_INFO(edev, " Firmware file : %s\n", fw_file);
301         DP_INFO(edev, "*********************************\n");
302 }
303
304 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
305 {
306         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
307         unsigned int i = 0, j = 0, qid;
308         unsigned int rxq_stat_cntrs, txq_stat_cntrs;
309         struct qede_tx_queue *txq;
310
311         DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
312
313         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
314                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
315         txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
316                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
317
318         for_each_rss(qid) {
319                 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
320                              offsetof(struct qede_rx_queue, rcv_pkts), 0,
321                             sizeof(uint64_t));
322                 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
323                              offsetof(struct qede_rx_queue, rx_hw_errors), 0,
324                             sizeof(uint64_t));
325                 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
326                              offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
327                             sizeof(uint64_t));
328
329                 if (xstats)
330                         for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
331                                 OSAL_MEMSET((((char *)
332                                               (qdev->fp_array[qid].rxq)) +
333                                              qede_rxq_xstats_strings[j].offset),
334                                             0,
335                                             sizeof(uint64_t));
336
337                 i++;
338                 if (i == rxq_stat_cntrs)
339                         break;
340         }
341
342         i = 0;
343
344         for_each_tss(qid) {
345                 txq = qdev->fp_array[qid].txq;
346
347                 OSAL_MEMSET((uint64_t *)(uintptr_t)
348                                 (((uint64_t)(uintptr_t)(txq)) +
349                                  offsetof(struct qede_tx_queue, xmit_pkts)), 0,
350                             sizeof(uint64_t));
351
352                 i++;
353                 if (i == txq_stat_cntrs)
354                         break;
355         }
356 }
357
358 static int
359 qede_stop_vport(struct ecore_dev *edev)
360 {
361         struct ecore_hwfn *p_hwfn;
362         uint8_t vport_id;
363         int rc;
364         int i;
365
366         vport_id = 0;
367         for_each_hwfn(edev, i) {
368                 p_hwfn = &edev->hwfns[i];
369                 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
370                                          vport_id);
371                 if (rc != ECORE_SUCCESS) {
372                         DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
373                         return rc;
374                 }
375         }
376
377         DP_INFO(edev, "vport stopped\n");
378
379         return 0;
380 }
381
382 static int
383 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
384 {
385         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
386         struct ecore_sp_vport_start_params params;
387         struct ecore_hwfn *p_hwfn;
388         int rc;
389         int i;
390
391         if (qdev->vport_started)
392                 qede_stop_vport(edev);
393
394         memset(&params, 0, sizeof(params));
395         params.vport_id = 0;
396         params.mtu = mtu;
397         /* @DPDK - Disable FW placement */
398         params.zero_placement_offset = 1;
399         for_each_hwfn(edev, i) {
400                 p_hwfn = &edev->hwfns[i];
401                 params.concrete_fid = p_hwfn->hw_info.concrete_fid;
402                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
403                 rc = ecore_sp_vport_start(p_hwfn, &params);
404                 if (rc != ECORE_SUCCESS) {
405                         DP_ERR(edev, "Start V-PORT failed %d\n", rc);
406                         return rc;
407                 }
408         }
409         ecore_reset_vport_stats(edev);
410         qdev->vport_started = true;
411         DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
412
413         return 0;
414 }
415
416 #define QEDE_NPAR_TX_SWITCHING          "npar_tx_switching"
417 #define QEDE_VF_TX_SWITCHING            "vf_tx_switching"
418
419 /* Activate or deactivate vport via vport-update */
420 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
421 {
422         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
423         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
424         struct ecore_sp_vport_update_params params;
425         struct ecore_hwfn *p_hwfn;
426         uint8_t i;
427         int rc = -1;
428
429         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
430         params.vport_id = 0;
431         params.update_vport_active_rx_flg = 1;
432         params.update_vport_active_tx_flg = 1;
433         params.vport_active_rx_flg = flg;
434         params.vport_active_tx_flg = flg;
435         if (~qdev->enable_tx_switching & flg) {
436                 params.update_tx_switching_flg = 1;
437                 params.tx_switching_flg = !flg;
438         }
439         for_each_hwfn(edev, i) {
440                 p_hwfn = &edev->hwfns[i];
441                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
442                 rc = ecore_sp_vport_update(p_hwfn, &params,
443                                 ECORE_SPQ_MODE_EBLOCK, NULL);
444                 if (rc != ECORE_SUCCESS) {
445                         DP_ERR(edev, "Failed to update vport\n");
446                         break;
447                 }
448         }
449         DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
450
451         return rc;
452 }
453
454 static void
455 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
456                            uint16_t mtu, bool enable)
457 {
458         /* Enable LRO in split mode */
459         sge_tpa_params->tpa_ipv4_en_flg = enable;
460         sge_tpa_params->tpa_ipv6_en_flg = enable;
461         sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
462         sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
463         /* set if tpa enable changes */
464         sge_tpa_params->update_tpa_en_flg = 1;
465         /* set if tpa parameters should be handled */
466         sge_tpa_params->update_tpa_param_flg = enable;
467
468         sge_tpa_params->max_buffers_per_cqe = 20;
469         /* Enable TPA in split mode. In this mode each TPA segment
470          * starts on the new BD, so there is one BD per segment.
471          */
472         sge_tpa_params->tpa_pkt_split_flg = 1;
473         sge_tpa_params->tpa_hdr_data_split_flg = 0;
474         sge_tpa_params->tpa_gro_consistent_flg = 0;
475         sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
476         sge_tpa_params->tpa_max_size = 0x7FFF;
477         sge_tpa_params->tpa_min_size_to_start = mtu / 2;
478         sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
479 }
480
481 /* Enable/disable LRO via vport-update */
482 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
483 {
484         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
485         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
486         struct ecore_sp_vport_update_params params;
487         struct ecore_sge_tpa_params tpa_params;
488         struct ecore_hwfn *p_hwfn;
489         int rc;
490         int i;
491
492         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
493         memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
494         qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
495         params.vport_id = 0;
496         params.sge_tpa_params = &tpa_params;
497         for_each_hwfn(edev, i) {
498                 p_hwfn = &edev->hwfns[i];
499                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
500                 rc = ecore_sp_vport_update(p_hwfn, &params,
501                                 ECORE_SPQ_MODE_EBLOCK, NULL);
502                 if (rc != ECORE_SUCCESS) {
503                         DP_ERR(edev, "Failed to update LRO\n");
504                         return -1;
505                 }
506         }
507         qdev->enable_lro = flg;
508         eth_dev->data->lro = flg;
509
510         DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
511
512         return 0;
513 }
514
515 static int
516 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
517                              enum qed_filter_rx_mode_type type)
518 {
519         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
520         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
521         struct ecore_filter_accept_flags flags;
522
523         memset(&flags, 0, sizeof(flags));
524
525         flags.update_rx_mode_config = 1;
526         flags.update_tx_mode_config = 1;
527         flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
528                 ECORE_ACCEPT_MCAST_MATCHED |
529                 ECORE_ACCEPT_BCAST;
530
531         flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
532                 ECORE_ACCEPT_MCAST_MATCHED |
533                 ECORE_ACCEPT_BCAST;
534
535         if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
536                 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
537                 if (IS_VF(edev)) {
538                         flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
539                         DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
540                 }
541         } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
542                 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
543         } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
544                                 QED_FILTER_RX_MODE_TYPE_PROMISC)) {
545                 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
546                         ECORE_ACCEPT_MCAST_UNMATCHED;
547         }
548
549         return ecore_filter_accept_cmd(edev, 0, flags, false, false,
550                         ECORE_SPQ_MODE_CB, NULL);
551 }
552
553 int
554 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
555                   bool add)
556 {
557         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
558         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
559         struct qede_ucast_entry *tmp = NULL;
560         struct qede_ucast_entry *u;
561         struct ether_addr *mac_addr;
562
563         mac_addr  = (struct ether_addr *)ucast->mac;
564         if (add) {
565                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
566                         if ((memcmp(mac_addr, &tmp->mac,
567                                     ETHER_ADDR_LEN) == 0) &&
568                              ucast->vni == tmp->vni &&
569                              ucast->vlan == tmp->vlan) {
570                                 DP_INFO(edev, "Unicast MAC is already added"
571                                         " with vlan = %u, vni = %u\n",
572                                         ucast->vlan,  ucast->vni);
573                                         return 0;
574                         }
575                 }
576                 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
577                                RTE_CACHE_LINE_SIZE);
578                 if (!u) {
579                         DP_ERR(edev, "Did not allocate memory for ucast\n");
580                         return -ENOMEM;
581                 }
582                 ether_addr_copy(mac_addr, &u->mac);
583                 u->vlan = ucast->vlan;
584                 u->vni = ucast->vni;
585                 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
586                 qdev->num_uc_addr++;
587         } else {
588                 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
589                         if ((memcmp(mac_addr, &tmp->mac,
590                                     ETHER_ADDR_LEN) == 0) &&
591                             ucast->vlan == tmp->vlan      &&
592                             ucast->vni == tmp->vni)
593                         break;
594                 }
595                 if (tmp == NULL) {
596                         DP_INFO(edev, "Unicast MAC is not found\n");
597                         return -EINVAL;
598                 }
599                 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
600                 qdev->num_uc_addr--;
601         }
602
603         return 0;
604 }
605
606 static int
607 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
608                        uint32_t mc_addrs_num)
609 {
610         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
611         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
612         struct ecore_filter_mcast mcast;
613         struct qede_mcast_entry *m = NULL;
614         uint8_t i;
615         int rc;
616
617         for (i = 0; i < mc_addrs_num; i++) {
618                 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
619                                RTE_CACHE_LINE_SIZE);
620                 if (!m) {
621                         DP_ERR(edev, "Did not allocate memory for mcast\n");
622                         return -ENOMEM;
623                 }
624                 ether_addr_copy(&mc_addrs[i], &m->mac);
625                 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
626         }
627         memset(&mcast, 0, sizeof(mcast));
628         mcast.num_mc_addrs = mc_addrs_num;
629         mcast.opcode = ECORE_FILTER_ADD;
630         for (i = 0; i < mc_addrs_num; i++)
631                 ether_addr_copy(&mc_addrs[i], (struct ether_addr *)
632                                                         &mcast.mac[i]);
633         rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
634         if (rc != ECORE_SUCCESS) {
635                 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
636                 return -1;
637         }
638
639         return 0;
640 }
641
642 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
643 {
644         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
645         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
646         struct qede_mcast_entry *tmp = NULL;
647         struct ecore_filter_mcast mcast;
648         int j;
649         int rc;
650
651         memset(&mcast, 0, sizeof(mcast));
652         mcast.num_mc_addrs = qdev->num_mc_addr;
653         mcast.opcode = ECORE_FILTER_REMOVE;
654         j = 0;
655         SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
656                 ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]);
657                 j++;
658         }
659         rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
660         if (rc != ECORE_SUCCESS) {
661                 DP_ERR(edev, "Failed to delete multicast filter\n");
662                 return -1;
663         }
664         /* Init the list */
665         while (!SLIST_EMPTY(&qdev->mc_list_head)) {
666                 tmp = SLIST_FIRST(&qdev->mc_list_head);
667                 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
668         }
669         SLIST_INIT(&qdev->mc_list_head);
670
671         return 0;
672 }
673
674 enum _ecore_status_t
675 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
676                  bool add)
677 {
678         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
679         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
680         enum _ecore_status_t rc = ECORE_INVAL;
681
682         if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
683                 DP_ERR(edev, "Ucast filter table limit exceeded,"
684                               " Please enable promisc mode\n");
685                         return ECORE_INVAL;
686         }
687
688         rc = qede_ucast_filter(eth_dev, ucast, add);
689         if (rc == 0)
690                 rc = ecore_filter_ucast_cmd(edev, ucast,
691                                             ECORE_SPQ_MODE_CB, NULL);
692         /* Indicate error only for add filter operation.
693          * Delete filter operations are not severe.
694          */
695         if ((rc != ECORE_SUCCESS) && add)
696                 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
697                        rc, add);
698
699         return rc;
700 }
701
702 static int
703 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
704                   __rte_unused uint32_t index, __rte_unused uint32_t pool)
705 {
706         struct ecore_filter_ucast ucast;
707         int re;
708
709         if (!is_valid_assigned_ether_addr(mac_addr))
710                 return -EINVAL;
711
712         qede_set_ucast_cmn_params(&ucast);
713         ucast.opcode = ECORE_FILTER_ADD;
714         ucast.type = ECORE_FILTER_MAC;
715         ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
716         re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
717         return re;
718 }
719
720 static void
721 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
722 {
723         struct qede_dev *qdev = eth_dev->data->dev_private;
724         struct ecore_dev *edev = &qdev->edev;
725         struct ecore_filter_ucast ucast;
726
727         PMD_INIT_FUNC_TRACE(edev);
728
729         if (index >= qdev->dev_info.num_mac_filters) {
730                 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
731                        index, qdev->dev_info.num_mac_filters);
732                 return;
733         }
734
735         if (!is_valid_assigned_ether_addr(&eth_dev->data->mac_addrs[index]))
736                 return;
737
738         qede_set_ucast_cmn_params(&ucast);
739         ucast.opcode = ECORE_FILTER_REMOVE;
740         ucast.type = ECORE_FILTER_MAC;
741
742         /* Use the index maintained by rte */
743         ether_addr_copy(&eth_dev->data->mac_addrs[index],
744                         (struct ether_addr *)&ucast.mac);
745
746         qede_mac_int_ops(eth_dev, &ucast, false);
747 }
748
749 static int
750 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
751 {
752         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
753         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
754
755         if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
756                                                mac_addr->addr_bytes)) {
757                 DP_ERR(edev, "Setting MAC address is not allowed\n");
758                 return -EPERM;
759         }
760
761         qede_mac_addr_remove(eth_dev, 0);
762
763         return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
764 }
765
766 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
767 {
768         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
769         struct ecore_sp_vport_update_params params;
770         struct ecore_hwfn *p_hwfn;
771         uint8_t i;
772         int rc;
773
774         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
775         params.vport_id = 0;
776         params.update_accept_any_vlan_flg = 1;
777         params.accept_any_vlan = flg;
778         for_each_hwfn(edev, i) {
779                 p_hwfn = &edev->hwfns[i];
780                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
781                 rc = ecore_sp_vport_update(p_hwfn, &params,
782                                 ECORE_SPQ_MODE_EBLOCK, NULL);
783                 if (rc != ECORE_SUCCESS) {
784                         DP_ERR(edev, "Failed to configure accept-any-vlan\n");
785                         return;
786                 }
787         }
788
789         DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
790 }
791
792 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
793 {
794         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
795         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
796         struct ecore_sp_vport_update_params params;
797         struct ecore_hwfn *p_hwfn;
798         uint8_t i;
799         int rc;
800
801         memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
802         params.vport_id = 0;
803         params.update_inner_vlan_removal_flg = 1;
804         params.inner_vlan_removal_flg = flg;
805         for_each_hwfn(edev, i) {
806                 p_hwfn = &edev->hwfns[i];
807                 params.opaque_fid = p_hwfn->hw_info.opaque_fid;
808                 rc = ecore_sp_vport_update(p_hwfn, &params,
809                                 ECORE_SPQ_MODE_EBLOCK, NULL);
810                 if (rc != ECORE_SUCCESS) {
811                         DP_ERR(edev, "Failed to update vport\n");
812                         return -1;
813                 }
814         }
815
816         DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
817         return 0;
818 }
819
820 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
821                                 uint16_t vlan_id, int on)
822 {
823         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
824         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
825         struct qed_dev_eth_info *dev_info = &qdev->dev_info;
826         struct qede_vlan_entry *tmp = NULL;
827         struct qede_vlan_entry *vlan;
828         struct ecore_filter_ucast ucast;
829         int rc;
830
831         if (on) {
832                 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
833                         DP_ERR(edev, "Reached max VLAN filter limit"
834                                       " enabling accept_any_vlan\n");
835                         qede_config_accept_any_vlan(qdev, true);
836                         return 0;
837                 }
838
839                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
840                         if (tmp->vid == vlan_id) {
841                                 DP_INFO(edev, "VLAN %u already configured\n",
842                                         vlan_id);
843                                 return 0;
844                         }
845                 }
846
847                 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
848                                   RTE_CACHE_LINE_SIZE);
849
850                 if (!vlan) {
851                         DP_ERR(edev, "Did not allocate memory for VLAN\n");
852                         return -ENOMEM;
853                 }
854
855                 qede_set_ucast_cmn_params(&ucast);
856                 ucast.opcode = ECORE_FILTER_ADD;
857                 ucast.type = ECORE_FILTER_VLAN;
858                 ucast.vlan = vlan_id;
859                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
860                                             NULL);
861                 if (rc != 0) {
862                         DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
863                                rc);
864                         rte_free(vlan);
865                 } else {
866                         vlan->vid = vlan_id;
867                         SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
868                         qdev->configured_vlans++;
869                         DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
870                                 vlan_id, qdev->configured_vlans);
871                 }
872         } else {
873                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
874                         if (tmp->vid == vlan_id)
875                                 break;
876                 }
877
878                 if (!tmp) {
879                         if (qdev->configured_vlans == 0) {
880                                 DP_INFO(edev,
881                                         "No VLAN filters configured yet\n");
882                                 return 0;
883                         }
884
885                         DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
886                         return -EINVAL;
887                 }
888
889                 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
890
891                 qede_set_ucast_cmn_params(&ucast);
892                 ucast.opcode = ECORE_FILTER_REMOVE;
893                 ucast.type = ECORE_FILTER_VLAN;
894                 ucast.vlan = vlan_id;
895                 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
896                                             NULL);
897                 if (rc != 0) {
898                         DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
899                                vlan_id, rc);
900                 } else {
901                         qdev->configured_vlans--;
902                         DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
903                                 vlan_id, qdev->configured_vlans);
904                 }
905         }
906
907         return rc;
908 }
909
910 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
911 {
912         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
913         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
914         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
915
916         if (mask & ETH_VLAN_STRIP_MASK) {
917                 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
918                         (void)qede_vlan_stripping(eth_dev, 1);
919                 else
920                         (void)qede_vlan_stripping(eth_dev, 0);
921         }
922
923         if (mask & ETH_VLAN_FILTER_MASK) {
924                 /* VLAN filtering kicks in when a VLAN is added */
925                 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
926                         qede_vlan_filter_set(eth_dev, 0, 1);
927                 } else {
928                         if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
929                                 DP_ERR(edev,
930                                   " Please remove existing VLAN filters"
931                                   " before disabling VLAN filtering\n");
932                                 /* Signal app that VLAN filtering is still
933                                  * enabled
934                                  */
935                                 eth_dev->data->dev_conf.rxmode.offloads |=
936                                                 DEV_RX_OFFLOAD_VLAN_FILTER;
937                         } else {
938                                 qede_vlan_filter_set(eth_dev, 0, 0);
939                         }
940                 }
941         }
942
943         if (mask & ETH_VLAN_EXTEND_MASK)
944                 DP_ERR(edev, "Extend VLAN not supported\n");
945
946         qdev->vlan_offload_mask = mask;
947
948         DP_INFO(edev, "VLAN offload mask %d\n", mask);
949
950         return 0;
951 }
952
953 static void qede_prandom_bytes(uint32_t *buff)
954 {
955         uint8_t i;
956
957         srand((unsigned int)time(NULL));
958         for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
959                 buff[i] = rand();
960 }
961
962 int qede_config_rss(struct rte_eth_dev *eth_dev)
963 {
964         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
965         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
966         uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
967         struct rte_eth_rss_reta_entry64 reta_conf[2];
968         struct rte_eth_rss_conf rss_conf;
969         uint32_t i, id, pos, q;
970
971         rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
972         if (!rss_conf.rss_key) {
973                 DP_INFO(edev, "Applying driver default key\n");
974                 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
975                 qede_prandom_bytes(&def_rss_key[0]);
976                 rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
977         }
978
979         /* Configure RSS hash */
980         if (qede_rss_hash_update(eth_dev, &rss_conf))
981                 return -EINVAL;
982
983         /* Configure default RETA */
984         memset(reta_conf, 0, sizeof(reta_conf));
985         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
986                 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
987
988         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
989                 id = i / RTE_RETA_GROUP_SIZE;
990                 pos = i % RTE_RETA_GROUP_SIZE;
991                 q = i % QEDE_RSS_COUNT(qdev);
992                 reta_conf[id].reta[pos] = q;
993         }
994         if (qede_rss_reta_update(eth_dev, &reta_conf[0],
995                                  ECORE_RSS_IND_TABLE_SIZE))
996                 return -EINVAL;
997
998         return 0;
999 }
1000
1001 static void qede_fastpath_start(struct ecore_dev *edev)
1002 {
1003         struct ecore_hwfn *p_hwfn;
1004         int i;
1005
1006         for_each_hwfn(edev, i) {
1007                 p_hwfn = &edev->hwfns[i];
1008                 ecore_hw_start_fastpath(p_hwfn);
1009         }
1010 }
1011
1012 static int qede_dev_start(struct rte_eth_dev *eth_dev)
1013 {
1014         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1015         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1016         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1017
1018         PMD_INIT_FUNC_TRACE(edev);
1019
1020         /* Update MTU only if it has changed */
1021         if (eth_dev->data->mtu != qdev->mtu) {
1022                 if (qede_update_mtu(eth_dev, qdev->mtu))
1023                         goto err;
1024         }
1025
1026         /* Configure TPA parameters */
1027         if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1028                 if (qede_enable_tpa(eth_dev, true))
1029                         return -EINVAL;
1030                 /* Enable scatter mode for LRO */
1031                 if (!eth_dev->data->scattered_rx)
1032                         rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
1033         }
1034
1035         /* Start queues */
1036         if (qede_start_queues(eth_dev))
1037                 goto err;
1038
1039         if (IS_PF(edev))
1040                 qede_reset_queue_stats(qdev, true);
1041
1042         /* Newer SR-IOV PF driver expects RX/TX queues to be started before
1043          * enabling RSS. Hence RSS configuration is deferred upto this point.
1044          * Also, we would like to retain similar behavior in PF case, so we
1045          * don't do PF/VF specific check here.
1046          */
1047         if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1048                 if (qede_config_rss(eth_dev))
1049                         goto err;
1050
1051         /* Enable vport*/
1052         if (qede_activate_vport(eth_dev, true))
1053                 goto err;
1054
1055         /* Update link status */
1056         qede_link_update(eth_dev, 0);
1057
1058         /* Start/resume traffic */
1059         qede_fastpath_start(edev);
1060
1061         DP_INFO(edev, "Device started\n");
1062
1063         return 0;
1064 err:
1065         DP_ERR(edev, "Device start fails\n");
1066         return -1; /* common error code is < 0 */
1067 }
1068
1069 static void qede_dev_stop(struct rte_eth_dev *eth_dev)
1070 {
1071         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1072         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1073
1074         PMD_INIT_FUNC_TRACE(edev);
1075
1076         /* Disable vport */
1077         if (qede_activate_vport(eth_dev, false))
1078                 return;
1079
1080         if (qdev->enable_lro)
1081                 qede_enable_tpa(eth_dev, false);
1082
1083         /* Stop queues */
1084         qede_stop_queues(eth_dev);
1085
1086         /* Disable traffic */
1087         ecore_hw_stop_fastpath(edev); /* TBD - loop */
1088
1089         DP_INFO(edev, "Device is stopped\n");
1090 }
1091
1092 const char *valid_args[] = {
1093         QEDE_NPAR_TX_SWITCHING,
1094         QEDE_VF_TX_SWITCHING,
1095         NULL,
1096 };
1097
1098 static int qede_args_check(const char *key, const char *val, void *opaque)
1099 {
1100         unsigned long tmp;
1101         int ret = 0;
1102         struct rte_eth_dev *eth_dev = opaque;
1103         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1104         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1105
1106         errno = 0;
1107         tmp = strtoul(val, NULL, 0);
1108         if (errno) {
1109                 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
1110                 return errno;
1111         }
1112
1113         if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
1114             ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) {
1115                 qdev->enable_tx_switching = !!tmp;
1116                 DP_INFO(edev, "Disabling %s tx-switching\n",
1117                         strcmp(QEDE_NPAR_TX_SWITCHING, key) ?
1118                         "VF" : "NPAR");
1119         }
1120
1121         return ret;
1122 }
1123
1124 static int qede_args(struct rte_eth_dev *eth_dev)
1125 {
1126         struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1127         struct rte_kvargs *kvlist;
1128         struct rte_devargs *devargs;
1129         int ret;
1130         int i;
1131
1132         devargs = pci_dev->device.devargs;
1133         if (!devargs)
1134                 return 0; /* return success */
1135
1136         kvlist = rte_kvargs_parse(devargs->args, valid_args);
1137         if (kvlist == NULL)
1138                 return -EINVAL;
1139
1140          /* Process parameters. */
1141         for (i = 0; (valid_args[i] != NULL); ++i) {
1142                 if (rte_kvargs_count(kvlist, valid_args[i])) {
1143                         ret = rte_kvargs_process(kvlist, valid_args[i],
1144                                                  qede_args_check, eth_dev);
1145                         if (ret != ECORE_SUCCESS) {
1146                                 rte_kvargs_free(kvlist);
1147                                 return ret;
1148                         }
1149                 }
1150         }
1151         rte_kvargs_free(kvlist);
1152
1153         return 0;
1154 }
1155
1156 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
1157 {
1158         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1159         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1160         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
1161         int ret;
1162
1163         PMD_INIT_FUNC_TRACE(edev);
1164
1165         /* Check requirements for 100G mode */
1166         if (ECORE_IS_CMT(edev)) {
1167                 if (eth_dev->data->nb_rx_queues < 2 ||
1168                     eth_dev->data->nb_tx_queues < 2) {
1169                         DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
1170                         return -EINVAL;
1171                 }
1172
1173                 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
1174                     (eth_dev->data->nb_tx_queues % 2 != 0)) {
1175                         DP_ERR(edev,
1176                                "100G mode needs even no. of RX/TX queues\n");
1177                         return -EINVAL;
1178                 }
1179         }
1180
1181         /* We need to have min 1 RX queue.There is no min check in
1182          * rte_eth_dev_configure(), so we are checking it here.
1183          */
1184         if (eth_dev->data->nb_rx_queues == 0) {
1185                 DP_ERR(edev, "Minimum one RX queue is required\n");
1186                 return -EINVAL;
1187         }
1188
1189         /* Enable Tx switching by default */
1190         qdev->enable_tx_switching = 1;
1191
1192         /* Parse devargs and fix up rxmode */
1193         if (qede_args(eth_dev))
1194                 DP_NOTICE(edev, false,
1195                           "Invalid devargs supplied, requested change will not take effect\n");
1196
1197         if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
1198               rxmode->mq_mode == ETH_MQ_RX_RSS)) {
1199                 DP_ERR(edev, "Unsupported multi-queue mode\n");
1200                 return -ENOTSUP;
1201         }
1202         /* Flow director mode check */
1203         if (qede_check_fdir_support(eth_dev))
1204                 return -ENOTSUP;
1205
1206         qede_dealloc_fp_resc(eth_dev);
1207         qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
1208         qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
1209         if (qede_alloc_fp_resc(qdev))
1210                 return -ENOMEM;
1211
1212         /* If jumbo enabled adjust MTU */
1213         if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1214                 eth_dev->data->mtu =
1215                         eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1216                         ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
1217
1218         if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
1219                 eth_dev->data->scattered_rx = 1;
1220
1221         if (qede_start_vport(qdev, eth_dev->data->mtu))
1222                 return -1;
1223
1224         qdev->mtu = eth_dev->data->mtu;
1225
1226         /* Enable VLAN offloads by default */
1227         ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
1228                                              ETH_VLAN_FILTER_MASK);
1229         if (ret)
1230                 return ret;
1231
1232         DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
1233                         QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
1234
1235         return 0;
1236 }
1237
1238 /* Info about HW descriptor ring limitations */
1239 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
1240         .nb_max = 0x8000, /* 32K */
1241         .nb_min = 128,
1242         .nb_align = 128 /* lowest common multiple */
1243 };
1244
1245 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
1246         .nb_max = 0x8000, /* 32K */
1247         .nb_min = 256,
1248         .nb_align = 256,
1249         .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
1250         .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
1251 };
1252
1253 static void
1254 qede_dev_info_get(struct rte_eth_dev *eth_dev,
1255                   struct rte_eth_dev_info *dev_info)
1256 {
1257         struct qede_dev *qdev = eth_dev->data->dev_private;
1258         struct ecore_dev *edev = &qdev->edev;
1259         struct qed_link_output link;
1260         uint32_t speed_cap = 0;
1261
1262         PMD_INIT_FUNC_TRACE(edev);
1263
1264         dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
1265         dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
1266         dev_info->rx_desc_lim = qede_rx_desc_lim;
1267         dev_info->tx_desc_lim = qede_tx_desc_lim;
1268
1269         if (IS_PF(edev))
1270                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1271                         QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
1272         else
1273                 dev_info->max_rx_queues = (uint16_t)RTE_MIN(
1274                         QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
1275         dev_info->max_tx_queues = dev_info->max_rx_queues;
1276
1277         dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
1278         dev_info->max_vfs = 0;
1279         dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
1280         dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
1281         dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
1282         dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM  |
1283                                      DEV_RX_OFFLOAD_UDP_CKSUM   |
1284                                      DEV_RX_OFFLOAD_TCP_CKSUM   |
1285                                      DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1286                                      DEV_RX_OFFLOAD_TCP_LRO     |
1287                                      DEV_RX_OFFLOAD_KEEP_CRC    |
1288                                      DEV_RX_OFFLOAD_SCATTER     |
1289                                      DEV_RX_OFFLOAD_JUMBO_FRAME |
1290                                      DEV_RX_OFFLOAD_VLAN_FILTER |
1291                                      DEV_RX_OFFLOAD_VLAN_STRIP);
1292         dev_info->rx_queue_offload_capa = 0;
1293
1294         /* TX offloads are on a per-packet basis, so it is applicable
1295          * to both at port and queue levels.
1296          */
1297         dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
1298                                      DEV_TX_OFFLOAD_IPV4_CKSUM  |
1299                                      DEV_TX_OFFLOAD_UDP_CKSUM   |
1300                                      DEV_TX_OFFLOAD_TCP_CKSUM   |
1301                                      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1302                                      DEV_TX_OFFLOAD_MULTI_SEGS  |
1303                                      DEV_TX_OFFLOAD_TCP_TSO     |
1304                                      DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1305                                      DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
1306         dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
1307
1308         dev_info->default_txconf = (struct rte_eth_txconf) {
1309                 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
1310         };
1311
1312         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1313                 /* Packets are always dropped if no descriptors are available */
1314                 .rx_drop_en = 1,
1315                 .offloads = 0,
1316         };
1317
1318         memset(&link, 0, sizeof(struct qed_link_output));
1319         qdev->ops->common->get_link(edev, &link);
1320         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1321                 speed_cap |= ETH_LINK_SPEED_1G;
1322         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1323                 speed_cap |= ETH_LINK_SPEED_10G;
1324         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1325                 speed_cap |= ETH_LINK_SPEED_25G;
1326         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1327                 speed_cap |= ETH_LINK_SPEED_40G;
1328         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1329                 speed_cap |= ETH_LINK_SPEED_50G;
1330         if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1331                 speed_cap |= ETH_LINK_SPEED_100G;
1332         dev_info->speed_capa = speed_cap;
1333 }
1334
1335 /* return 0 means link status changed, -1 means not changed */
1336 int
1337 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
1338 {
1339         struct qede_dev *qdev = eth_dev->data->dev_private;
1340         struct ecore_dev *edev = &qdev->edev;
1341         struct qed_link_output q_link;
1342         struct rte_eth_link link;
1343         uint16_t link_duplex;
1344
1345         memset(&q_link, 0, sizeof(q_link));
1346         memset(&link, 0, sizeof(link));
1347
1348         qdev->ops->common->get_link(edev, &q_link);
1349
1350         /* Link Speed */
1351         link.link_speed = q_link.speed;
1352
1353         /* Link Mode */
1354         switch (q_link.duplex) {
1355         case QEDE_DUPLEX_HALF:
1356                 link_duplex = ETH_LINK_HALF_DUPLEX;
1357                 break;
1358         case QEDE_DUPLEX_FULL:
1359                 link_duplex = ETH_LINK_FULL_DUPLEX;
1360                 break;
1361         case QEDE_DUPLEX_UNKNOWN:
1362         default:
1363                 link_duplex = -1;
1364         }
1365         link.link_duplex = link_duplex;
1366
1367         /* Link Status */
1368         link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1369
1370         /* AN */
1371         link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
1372                              ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1373
1374         DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
1375                 link.link_speed, link.link_duplex,
1376                 link.link_autoneg, link.link_status);
1377
1378         return rte_eth_linkstatus_set(eth_dev, &link);
1379 }
1380
1381 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
1382 {
1383 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1384         struct qede_dev *qdev = eth_dev->data->dev_private;
1385         struct ecore_dev *edev = &qdev->edev;
1386
1387         PMD_INIT_FUNC_TRACE(edev);
1388 #endif
1389
1390         enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
1391
1392         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1393                 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1394
1395         qed_configure_filter_rx_mode(eth_dev, type);
1396 }
1397
1398 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
1399 {
1400 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
1401         struct qede_dev *qdev = eth_dev->data->dev_private;
1402         struct ecore_dev *edev = &qdev->edev;
1403
1404         PMD_INIT_FUNC_TRACE(edev);
1405 #endif
1406
1407         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
1408                 qed_configure_filter_rx_mode(eth_dev,
1409                                 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
1410         else
1411                 qed_configure_filter_rx_mode(eth_dev,
1412                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1413 }
1414
1415 static void qede_poll_sp_sb_cb(void *param)
1416 {
1417         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1418         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1419         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1420         int rc;
1421
1422         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
1423         qede_interrupt_action(&edev->hwfns[1]);
1424
1425         rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
1426                                qede_poll_sp_sb_cb,
1427                                (void *)eth_dev);
1428         if (rc != 0) {
1429                 DP_ERR(edev, "Unable to start periodic"
1430                              " timer rc %d\n", rc);
1431                 assert(false && "Unable to start periodic timer");
1432         }
1433 }
1434
1435 static void qede_dev_close(struct rte_eth_dev *eth_dev)
1436 {
1437         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1438         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1439         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1440
1441         PMD_INIT_FUNC_TRACE(edev);
1442
1443         /* dev_stop() shall cleanup fp resources in hw but without releasing
1444          * dma memories and sw structures so that dev_start() can be called
1445          * by the app without reconfiguration. However, in dev_close() we
1446          * can release all the resources and device can be brought up newly
1447          */
1448         if (eth_dev->data->dev_started)
1449                 qede_dev_stop(eth_dev);
1450
1451         qede_stop_vport(edev);
1452         qdev->vport_started = false;
1453         qede_fdir_dealloc_resc(eth_dev);
1454         qede_dealloc_fp_resc(eth_dev);
1455
1456         eth_dev->data->nb_rx_queues = 0;
1457         eth_dev->data->nb_tx_queues = 0;
1458
1459         /* Bring the link down */
1460         qede_dev_set_link_state(eth_dev, false);
1461         qdev->ops->common->slowpath_stop(edev);
1462         qdev->ops->common->remove(edev);
1463         rte_intr_disable(&pci_dev->intr_handle);
1464
1465         switch (pci_dev->intr_handle.type) {
1466         case RTE_INTR_HANDLE_UIO_INTX:
1467         case RTE_INTR_HANDLE_VFIO_LEGACY:
1468                 rte_intr_callback_unregister(&pci_dev->intr_handle,
1469                                              qede_interrupt_handler_intx,
1470                                              (void *)eth_dev);
1471                 break;
1472         default:
1473                 rte_intr_callback_unregister(&pci_dev->intr_handle,
1474                                            qede_interrupt_handler,
1475                                            (void *)eth_dev);
1476         }
1477
1478         if (ECORE_IS_CMT(edev))
1479                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
1480 }
1481
1482 static int
1483 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
1484 {
1485         struct qede_dev *qdev = eth_dev->data->dev_private;
1486         struct ecore_dev *edev = &qdev->edev;
1487         struct ecore_eth_stats stats;
1488         unsigned int i = 0, j = 0, qid;
1489         unsigned int rxq_stat_cntrs, txq_stat_cntrs;
1490         struct qede_tx_queue *txq;
1491
1492         ecore_get_vport_stats(edev, &stats);
1493
1494         /* RX Stats */
1495         eth_stats->ipackets = stats.common.rx_ucast_pkts +
1496             stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
1497
1498         eth_stats->ibytes = stats.common.rx_ucast_bytes +
1499             stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
1500
1501         eth_stats->ierrors = stats.common.rx_crc_errors +
1502             stats.common.rx_align_errors +
1503             stats.common.rx_carrier_errors +
1504             stats.common.rx_oversize_packets +
1505             stats.common.rx_jabbers + stats.common.rx_undersize_packets;
1506
1507         eth_stats->rx_nombuf = stats.common.no_buff_discards;
1508
1509         eth_stats->imissed = stats.common.mftag_filter_discards +
1510             stats.common.mac_filter_discards +
1511             stats.common.no_buff_discards +
1512             stats.common.brb_truncates + stats.common.brb_discards;
1513
1514         /* TX stats */
1515         eth_stats->opackets = stats.common.tx_ucast_pkts +
1516             stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
1517
1518         eth_stats->obytes = stats.common.tx_ucast_bytes +
1519             stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
1520
1521         eth_stats->oerrors = stats.common.tx_err_drop_pkts;
1522
1523         /* Queue stats */
1524         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1525                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1526         txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
1527                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
1528         if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
1529             (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
1530                 DP_VERBOSE(edev, ECORE_MSG_DEBUG,
1531                        "Not all the queue stats will be displayed. Set"
1532                        " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
1533                        " appropriately and retry.\n");
1534
1535         for_each_rss(qid) {
1536                 eth_stats->q_ipackets[i] =
1537                         *(uint64_t *)(
1538                                 ((char *)(qdev->fp_array[qid].rxq)) +
1539                                 offsetof(struct qede_rx_queue,
1540                                 rcv_pkts));
1541                 eth_stats->q_errors[i] =
1542                         *(uint64_t *)(
1543                                 ((char *)(qdev->fp_array[qid].rxq)) +
1544                                 offsetof(struct qede_rx_queue,
1545                                 rx_hw_errors)) +
1546                         *(uint64_t *)(
1547                                 ((char *)(qdev->fp_array[qid].rxq)) +
1548                                 offsetof(struct qede_rx_queue,
1549                                 rx_alloc_errors));
1550                 i++;
1551                 if (i == rxq_stat_cntrs)
1552                         break;
1553         }
1554
1555         for_each_tss(qid) {
1556                 txq = qdev->fp_array[qid].txq;
1557                 eth_stats->q_opackets[j] =
1558                         *((uint64_t *)(uintptr_t)
1559                                 (((uint64_t)(uintptr_t)(txq)) +
1560                                  offsetof(struct qede_tx_queue,
1561                                           xmit_pkts)));
1562                 j++;
1563                 if (j == txq_stat_cntrs)
1564                         break;
1565         }
1566
1567         return 0;
1568 }
1569
1570 static unsigned
1571 qede_get_xstats_count(struct qede_dev *qdev) {
1572         if (ECORE_IS_BB(&qdev->edev))
1573                 return RTE_DIM(qede_xstats_strings) +
1574                        RTE_DIM(qede_bb_xstats_strings) +
1575                        (RTE_DIM(qede_rxq_xstats_strings) *
1576                         RTE_MIN(QEDE_RSS_COUNT(qdev),
1577                                 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1578         else
1579                 return RTE_DIM(qede_xstats_strings) +
1580                        RTE_DIM(qede_ah_xstats_strings) +
1581                        (RTE_DIM(qede_rxq_xstats_strings) *
1582                         RTE_MIN(QEDE_RSS_COUNT(qdev),
1583                                 RTE_ETHDEV_QUEUE_STAT_CNTRS));
1584 }
1585
1586 static int
1587 qede_get_xstats_names(struct rte_eth_dev *dev,
1588                       struct rte_eth_xstat_name *xstats_names,
1589                       __rte_unused unsigned int limit)
1590 {
1591         struct qede_dev *qdev = dev->data->dev_private;
1592         struct ecore_dev *edev = &qdev->edev;
1593         const unsigned int stat_cnt = qede_get_xstats_count(qdev);
1594         unsigned int i, qid, stat_idx = 0;
1595         unsigned int rxq_stat_cntrs;
1596
1597         if (xstats_names != NULL) {
1598                 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1599                         snprintf(xstats_names[stat_idx].name,
1600                                 sizeof(xstats_names[stat_idx].name),
1601                                 "%s",
1602                                 qede_xstats_strings[i].name);
1603                         stat_idx++;
1604                 }
1605
1606                 if (ECORE_IS_BB(edev)) {
1607                         for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1608                                 snprintf(xstats_names[stat_idx].name,
1609                                         sizeof(xstats_names[stat_idx].name),
1610                                         "%s",
1611                                         qede_bb_xstats_strings[i].name);
1612                                 stat_idx++;
1613                         }
1614                 } else {
1615                         for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1616                                 snprintf(xstats_names[stat_idx].name,
1617                                         sizeof(xstats_names[stat_idx].name),
1618                                         "%s",
1619                                         qede_ah_xstats_strings[i].name);
1620                                 stat_idx++;
1621                         }
1622                 }
1623
1624                 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1625                                          RTE_ETHDEV_QUEUE_STAT_CNTRS);
1626                 for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1627                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1628                                 snprintf(xstats_names[stat_idx].name,
1629                                         sizeof(xstats_names[stat_idx].name),
1630                                         "%.4s%d%s",
1631                                         qede_rxq_xstats_strings[i].name, qid,
1632                                         qede_rxq_xstats_strings[i].name + 4);
1633                                 stat_idx++;
1634                         }
1635                 }
1636         }
1637
1638         return stat_cnt;
1639 }
1640
1641 static int
1642 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1643                 unsigned int n)
1644 {
1645         struct qede_dev *qdev = dev->data->dev_private;
1646         struct ecore_dev *edev = &qdev->edev;
1647         struct ecore_eth_stats stats;
1648         const unsigned int num = qede_get_xstats_count(qdev);
1649         unsigned int i, qid, stat_idx = 0;
1650         unsigned int rxq_stat_cntrs;
1651
1652         if (n < num)
1653                 return num;
1654
1655         ecore_get_vport_stats(edev, &stats);
1656
1657         for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
1658                 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
1659                                              qede_xstats_strings[i].offset);
1660                 xstats[stat_idx].id = stat_idx;
1661                 stat_idx++;
1662         }
1663
1664         if (ECORE_IS_BB(edev)) {
1665                 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
1666                         xstats[stat_idx].value =
1667                                         *(uint64_t *)(((char *)&stats) +
1668                                         qede_bb_xstats_strings[i].offset);
1669                         xstats[stat_idx].id = stat_idx;
1670                         stat_idx++;
1671                 }
1672         } else {
1673                 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
1674                         xstats[stat_idx].value =
1675                                         *(uint64_t *)(((char *)&stats) +
1676                                         qede_ah_xstats_strings[i].offset);
1677                         xstats[stat_idx].id = stat_idx;
1678                         stat_idx++;
1679                 }
1680         }
1681
1682         rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
1683                                  RTE_ETHDEV_QUEUE_STAT_CNTRS);
1684         for (qid = 0; qid < rxq_stat_cntrs; qid++) {
1685                 for_each_rss(qid) {
1686                         for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
1687                                 xstats[stat_idx].value = *(uint64_t *)(
1688                                         ((char *)(qdev->fp_array[qid].rxq)) +
1689                                          qede_rxq_xstats_strings[i].offset);
1690                                 xstats[stat_idx].id = stat_idx;
1691                                 stat_idx++;
1692                         }
1693                 }
1694         }
1695
1696         return stat_idx;
1697 }
1698
1699 static void
1700 qede_reset_xstats(struct rte_eth_dev *dev)
1701 {
1702         struct qede_dev *qdev = dev->data->dev_private;
1703         struct ecore_dev *edev = &qdev->edev;
1704
1705         ecore_reset_vport_stats(edev);
1706         qede_reset_queue_stats(qdev, true);
1707 }
1708
1709 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
1710 {
1711         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1712         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1713         struct qed_link_params link_params;
1714         int rc;
1715
1716         DP_INFO(edev, "setting link state %d\n", link_up);
1717         memset(&link_params, 0, sizeof(link_params));
1718         link_params.link_up = link_up;
1719         rc = qdev->ops->common->set_link(edev, &link_params);
1720         if (rc != ECORE_SUCCESS)
1721                 DP_ERR(edev, "Unable to set link state %d\n", link_up);
1722
1723         return rc;
1724 }
1725
1726 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
1727 {
1728         return qede_dev_set_link_state(eth_dev, true);
1729 }
1730
1731 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
1732 {
1733         return qede_dev_set_link_state(eth_dev, false);
1734 }
1735
1736 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
1737 {
1738         struct qede_dev *qdev = eth_dev->data->dev_private;
1739         struct ecore_dev *edev = &qdev->edev;
1740
1741         ecore_reset_vport_stats(edev);
1742         qede_reset_queue_stats(qdev, false);
1743 }
1744
1745 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
1746 {
1747         enum qed_filter_rx_mode_type type =
1748             QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1749
1750         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1751                 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
1752
1753         qed_configure_filter_rx_mode(eth_dev, type);
1754 }
1755
1756 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
1757 {
1758         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
1759                 qed_configure_filter_rx_mode(eth_dev,
1760                                 QED_FILTER_RX_MODE_TYPE_PROMISC);
1761         else
1762                 qed_configure_filter_rx_mode(eth_dev,
1763                                 QED_FILTER_RX_MODE_TYPE_REGULAR);
1764 }
1765
1766 static int
1767 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
1768                       uint32_t mc_addrs_num)
1769 {
1770         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1771         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1772         uint8_t i;
1773
1774         if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
1775                 DP_ERR(edev, "Reached max multicast filters limit,"
1776                              "Please enable multicast promisc mode\n");
1777                 return -ENOSPC;
1778         }
1779
1780         for (i = 0; i < mc_addrs_num; i++) {
1781                 if (!is_multicast_ether_addr(&mc_addrs[i])) {
1782                         DP_ERR(edev, "Not a valid multicast MAC\n");
1783                         return -EINVAL;
1784                 }
1785         }
1786
1787         /* Flush all existing entries */
1788         if (qede_del_mcast_filters(eth_dev))
1789                 return -1;
1790
1791         /* Set new mcast list */
1792         return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
1793 }
1794
1795 /* Update MTU via vport-update without doing port restart.
1796  * The vport must be deactivated before calling this API.
1797  */
1798 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
1799 {
1800         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1801         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1802         struct ecore_hwfn *p_hwfn;
1803         int rc;
1804         int i;
1805
1806         if (IS_PF(edev)) {
1807                 struct ecore_sp_vport_update_params params;
1808
1809                 memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
1810                 params.vport_id = 0;
1811                 params.mtu = mtu;
1812                 params.vport_id = 0;
1813                 for_each_hwfn(edev, i) {
1814                         p_hwfn = &edev->hwfns[i];
1815                         params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1816                         rc = ecore_sp_vport_update(p_hwfn, &params,
1817                                         ECORE_SPQ_MODE_EBLOCK, NULL);
1818                         if (rc != ECORE_SUCCESS)
1819                                 goto err;
1820                 }
1821         } else {
1822                 for_each_hwfn(edev, i) {
1823                         p_hwfn = &edev->hwfns[i];
1824                         rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
1825                         if (rc == ECORE_INVAL) {
1826                                 DP_INFO(edev, "VF MTU Update TLV not supported\n");
1827                                 /* Recreate vport */
1828                                 rc = qede_start_vport(qdev, mtu);
1829                                 if (rc != ECORE_SUCCESS)
1830                                         goto err;
1831
1832                                 /* Restore config lost due to vport stop */
1833                                 if (eth_dev->data->promiscuous)
1834                                         qede_promiscuous_enable(eth_dev);
1835                                 else
1836                                         qede_promiscuous_disable(eth_dev);
1837
1838                                 if (eth_dev->data->all_multicast)
1839                                         qede_allmulticast_enable(eth_dev);
1840                                 else
1841                                         qede_allmulticast_disable(eth_dev);
1842
1843                                 qede_vlan_offload_set(eth_dev,
1844                                                       qdev->vlan_offload_mask);
1845                         } else if (rc != ECORE_SUCCESS) {
1846                                 goto err;
1847                         }
1848                 }
1849         }
1850         DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
1851
1852         return 0;
1853
1854 err:
1855         DP_ERR(edev, "Failed to update MTU\n");
1856         return -1;
1857 }
1858
1859 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1860                               struct rte_eth_fc_conf *fc_conf)
1861 {
1862         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1863         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1864         struct qed_link_output current_link;
1865         struct qed_link_params params;
1866
1867         memset(&current_link, 0, sizeof(current_link));
1868         qdev->ops->common->get_link(edev, &current_link);
1869
1870         memset(&params, 0, sizeof(params));
1871         params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
1872         if (fc_conf->autoneg) {
1873                 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
1874                         DP_ERR(edev, "Autoneg not supported\n");
1875                         return -EINVAL;
1876                 }
1877                 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1878         }
1879
1880         /* Pause is assumed to be supported (SUPPORTED_Pause) */
1881         if (fc_conf->mode == RTE_FC_FULL)
1882                 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
1883                                         QED_LINK_PAUSE_RX_ENABLE);
1884         if (fc_conf->mode == RTE_FC_TX_PAUSE)
1885                 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1886         if (fc_conf->mode == RTE_FC_RX_PAUSE)
1887                 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1888
1889         params.link_up = true;
1890         (void)qdev->ops->common->set_link(edev, &params);
1891
1892         return 0;
1893 }
1894
1895 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1896                               struct rte_eth_fc_conf *fc_conf)
1897 {
1898         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1899         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1900         struct qed_link_output current_link;
1901
1902         memset(&current_link, 0, sizeof(current_link));
1903         qdev->ops->common->get_link(edev, &current_link);
1904
1905         if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1906                 fc_conf->autoneg = true;
1907
1908         if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1909                                          QED_LINK_PAUSE_TX_ENABLE))
1910                 fc_conf->mode = RTE_FC_FULL;
1911         else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1912                 fc_conf->mode = RTE_FC_RX_PAUSE;
1913         else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1914                 fc_conf->mode = RTE_FC_TX_PAUSE;
1915         else
1916                 fc_conf->mode = RTE_FC_NONE;
1917
1918         return 0;
1919 }
1920
1921 static const uint32_t *
1922 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1923 {
1924         static const uint32_t ptypes[] = {
1925                 RTE_PTYPE_L2_ETHER,
1926                 RTE_PTYPE_L2_ETHER_VLAN,
1927                 RTE_PTYPE_L3_IPV4,
1928                 RTE_PTYPE_L3_IPV6,
1929                 RTE_PTYPE_L4_TCP,
1930                 RTE_PTYPE_L4_UDP,
1931                 RTE_PTYPE_TUNNEL_VXLAN,
1932                 RTE_PTYPE_L4_FRAG,
1933                 RTE_PTYPE_TUNNEL_GENEVE,
1934                 RTE_PTYPE_TUNNEL_GRE,
1935                 /* Inner */
1936                 RTE_PTYPE_INNER_L2_ETHER,
1937                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1938                 RTE_PTYPE_INNER_L3_IPV4,
1939                 RTE_PTYPE_INNER_L3_IPV6,
1940                 RTE_PTYPE_INNER_L4_TCP,
1941                 RTE_PTYPE_INNER_L4_UDP,
1942                 RTE_PTYPE_INNER_L4_FRAG,
1943                 RTE_PTYPE_UNKNOWN
1944         };
1945
1946         if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1947                 return ptypes;
1948
1949         return NULL;
1950 }
1951
1952 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1953 {
1954         *rss_caps = 0;
1955         *rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
1956         *rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
1957         *rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
1958         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
1959         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
1960         *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
1961         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
1962         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
1963 }
1964
1965 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1966                          struct rte_eth_rss_conf *rss_conf)
1967 {
1968         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1969         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1970         struct ecore_sp_vport_update_params vport_update_params;
1971         struct ecore_rss_params rss_params;
1972         struct ecore_hwfn *p_hwfn;
1973         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1974         uint64_t hf = rss_conf->rss_hf;
1975         uint8_t len = rss_conf->rss_key_len;
1976         uint8_t idx;
1977         uint8_t i;
1978         int rc;
1979
1980         memset(&vport_update_params, 0, sizeof(vport_update_params));
1981         memset(&rss_params, 0, sizeof(rss_params));
1982
1983         DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
1984                 (unsigned long)hf, len, key);
1985
1986         if (hf != 0) {
1987                 /* Enabling RSS */
1988                 DP_INFO(edev, "Enabling rss\n");
1989
1990                 /* RSS caps */
1991                 qede_init_rss_caps(&rss_params.rss_caps, hf);
1992                 rss_params.update_rss_capabilities = 1;
1993
1994                 /* RSS hash key */
1995                 if (key) {
1996                         if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
1997                                 DP_ERR(edev, "RSS key length exceeds limit\n");
1998                                 return -EINVAL;
1999                         }
2000                         DP_INFO(edev, "Applying user supplied hash key\n");
2001                         rss_params.update_rss_key = 1;
2002                         memcpy(&rss_params.rss_key, key, len);
2003                 }
2004                 rss_params.rss_enable = 1;
2005         }
2006
2007         rss_params.update_rss_config = 1;
2008         /* tbl_size has to be set with capabilities */
2009         rss_params.rss_table_size_log = 7;
2010         vport_update_params.vport_id = 0;
2011         /* pass the L2 handles instead of qids */
2012         for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
2013                 idx = i % QEDE_RSS_COUNT(qdev);
2014                 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
2015         }
2016         vport_update_params.rss_params = &rss_params;
2017
2018         for_each_hwfn(edev, i) {
2019                 p_hwfn = &edev->hwfns[i];
2020                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2021                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2022                                            ECORE_SPQ_MODE_EBLOCK, NULL);
2023                 if (rc) {
2024                         DP_ERR(edev, "vport-update for RSS failed\n");
2025                         return rc;
2026                 }
2027         }
2028         qdev->rss_enable = rss_params.rss_enable;
2029
2030         /* Update local structure for hash query */
2031         qdev->rss_conf.rss_hf = hf;
2032         qdev->rss_conf.rss_key_len = len;
2033         if (qdev->rss_enable) {
2034                 if  (qdev->rss_conf.rss_key == NULL) {
2035                         qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
2036                         if (qdev->rss_conf.rss_key == NULL) {
2037                                 DP_ERR(edev, "No memory to store RSS key\n");
2038                                 return -ENOMEM;
2039                         }
2040                 }
2041                 if (key && len) {
2042                         DP_INFO(edev, "Storing RSS key\n");
2043                         memcpy(qdev->rss_conf.rss_key, key, len);
2044                 }
2045         } else if (!qdev->rss_enable && len == 0) {
2046                 if (qdev->rss_conf.rss_key) {
2047                         free(qdev->rss_conf.rss_key);
2048                         qdev->rss_conf.rss_key = NULL;
2049                         DP_INFO(edev, "Free RSS key\n");
2050                 }
2051         }
2052
2053         return 0;
2054 }
2055
2056 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
2057                            struct rte_eth_rss_conf *rss_conf)
2058 {
2059         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2060
2061         rss_conf->rss_hf = qdev->rss_conf.rss_hf;
2062         rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
2063
2064         if (rss_conf->rss_key && qdev->rss_conf.rss_key)
2065                 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
2066                        rss_conf->rss_key_len);
2067         return 0;
2068 }
2069
2070 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
2071                                     struct ecore_rss_params *rss)
2072 {
2073         int i, fn;
2074         bool rss_mode = 1; /* enable */
2075         struct ecore_queue_cid *cid;
2076         struct ecore_rss_params *t_rss;
2077
2078         /* In regular scenario, we'd simply need to take input handlers.
2079          * But in CMT, we'd have to split the handlers according to the
2080          * engine they were configured on. We'd then have to understand
2081          * whether RSS is really required, since 2-queues on CMT doesn't
2082          * require RSS.
2083          */
2084
2085         /* CMT should be round-robin */
2086         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
2087                 cid = rss->rss_ind_table[i];
2088
2089                 if (cid->p_owner == ECORE_LEADING_HWFN(edev))
2090                         t_rss = &rss[0];
2091                 else
2092                         t_rss = &rss[1];
2093
2094                 t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
2095         }
2096
2097         t_rss = &rss[1];
2098         t_rss->update_rss_ind_table = 1;
2099         t_rss->rss_table_size_log = 7;
2100         t_rss->update_rss_config = 1;
2101
2102         /* Make sure RSS is actually required */
2103         for_each_hwfn(edev, fn) {
2104                 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
2105                      i++) {
2106                         if (rss[fn].rss_ind_table[i] !=
2107                             rss[fn].rss_ind_table[0])
2108                                 break;
2109                 }
2110
2111                 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
2112                         DP_INFO(edev,
2113                                 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2114                         rss_mode = 0;
2115                         goto out;
2116                 }
2117         }
2118
2119 out:
2120         t_rss->rss_enable = rss_mode;
2121
2122         return rss_mode;
2123 }
2124
2125 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
2126                          struct rte_eth_rss_reta_entry64 *reta_conf,
2127                          uint16_t reta_size)
2128 {
2129         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
2130         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2131         struct ecore_sp_vport_update_params vport_update_params;
2132         struct ecore_rss_params *params;
2133         struct ecore_hwfn *p_hwfn;
2134         uint16_t i, idx, shift;
2135         uint8_t entry;
2136         int rc = 0;
2137
2138         if (reta_size > ETH_RSS_RETA_SIZE_128) {
2139                 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
2140                        reta_size);
2141                 return -EINVAL;
2142         }
2143
2144         memset(&vport_update_params, 0, sizeof(vport_update_params));
2145         params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
2146                              RTE_CACHE_LINE_SIZE);
2147         if (params == NULL) {
2148                 DP_ERR(edev, "failed to allocate memory\n");
2149                 return -ENOMEM;
2150         }
2151
2152         for (i = 0; i < reta_size; i++) {
2153                 idx = i / RTE_RETA_GROUP_SIZE;
2154                 shift = i % RTE_RETA_GROUP_SIZE;
2155                 if (reta_conf[idx].mask & (1ULL << shift)) {
2156                         entry = reta_conf[idx].reta[shift];
2157                         /* Pass rxq handles to ecore */
2158                         params->rss_ind_table[i] =
2159                                         qdev->fp_array[entry].rxq->handle;
2160                         /* Update the local copy for RETA query command */
2161                         qdev->rss_ind_table[i] = entry;
2162                 }
2163         }
2164
2165         params->update_rss_ind_table = 1;
2166         params->rss_table_size_log = 7;
2167         params->update_rss_config = 1;
2168
2169         /* Fix up RETA for CMT mode device */
2170         if (ECORE_IS_CMT(edev))
2171                 qdev->rss_enable = qede_update_rss_parm_cmt(edev,
2172                                                             params);
2173         vport_update_params.vport_id = 0;
2174         /* Use the current value of rss_enable */
2175         params->rss_enable = qdev->rss_enable;
2176         vport_update_params.rss_params = params;
2177
2178         for_each_hwfn(edev, i) {
2179                 p_hwfn = &edev->hwfns[i];
2180                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2181                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
2182                                            ECORE_SPQ_MODE_EBLOCK, NULL);
2183                 if (rc) {
2184                         DP_ERR(edev, "vport-update for RSS failed\n");
2185                         goto out;
2186                 }
2187         }
2188
2189 out:
2190         rte_free(params);
2191         return rc;
2192 }
2193
2194 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
2195                                struct rte_eth_rss_reta_entry64 *reta_conf,
2196                                uint16_t reta_size)
2197 {
2198         struct qede_dev *qdev = eth_dev->data->dev_private;
2199         struct ecore_dev *edev = &qdev->edev;
2200         uint16_t i, idx, shift;
2201         uint8_t entry;
2202
2203         if (reta_size > ETH_RSS_RETA_SIZE_128) {
2204                 DP_ERR(edev, "reta_size %d is not supported\n",
2205                        reta_size);
2206                 return -EINVAL;
2207         }
2208
2209         for (i = 0; i < reta_size; i++) {
2210                 idx = i / RTE_RETA_GROUP_SIZE;
2211                 shift = i % RTE_RETA_GROUP_SIZE;
2212                 if (reta_conf[idx].mask & (1ULL << shift)) {
2213                         entry = qdev->rss_ind_table[i];
2214                         reta_conf[idx].reta[shift] = entry;
2215                 }
2216         }
2217
2218         return 0;
2219 }
2220
2221
2222
2223 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
2224 {
2225         struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
2226         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
2227         struct rte_eth_dev_info dev_info = {0};
2228         struct qede_fastpath *fp;
2229         uint32_t max_rx_pkt_len;
2230         uint32_t frame_size;
2231         uint16_t bufsz;
2232         bool restart = false;
2233         int i, rc;
2234
2235         PMD_INIT_FUNC_TRACE(edev);
2236         qede_dev_info_get(dev, &dev_info);
2237         max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
2238         frame_size = max_rx_pkt_len;
2239         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
2240                 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
2241                        mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
2242                        QEDE_ETH_OVERHEAD);
2243                 return -EINVAL;
2244         }
2245         if (!dev->data->scattered_rx &&
2246             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
2247                 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
2248                         dev->data->min_rx_buf_size);
2249                 return -EINVAL;
2250         }
2251         /* Temporarily replace I/O functions with dummy ones. It cannot
2252          * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
2253          */
2254         dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
2255         dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
2256         if (dev->data->dev_started) {
2257                 dev->data->dev_started = 0;
2258                 qede_dev_stop(dev);
2259                 restart = true;
2260         }
2261         rte_delay_ms(1000);
2262         qdev->mtu = mtu;
2263
2264         /* Fix up RX buf size for all queues of the port */
2265         for_each_rss(i) {
2266                 fp = &qdev->fp_array[i];
2267                 if (fp->rxq != NULL) {
2268                         bufsz = (uint16_t)rte_pktmbuf_data_room_size(
2269                                 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
2270                         /* cache align the mbuf size to simplfy rx_buf_size
2271                          * calculation
2272                          */
2273                         bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
2274                         rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
2275                         if (rc < 0)
2276                                 return rc;
2277
2278                         fp->rxq->rx_buf_size = rc;
2279                 }
2280         }
2281         if (max_rx_pkt_len > ETHER_MAX_LEN)
2282                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2283         else
2284                 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2285
2286         if (!dev->data->dev_started && restart) {
2287                 qede_dev_start(dev);
2288                 dev->data->dev_started = 1;
2289         }
2290
2291         /* update max frame size */
2292         dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
2293         /* Reassign back */
2294         dev->rx_pkt_burst = qede_recv_pkts;
2295         dev->tx_pkt_burst = qede_xmit_pkts;
2296
2297         return 0;
2298 }
2299
2300 static int
2301 qede_dev_reset(struct rte_eth_dev *dev)
2302 {
2303         int ret;
2304
2305         ret = qede_eth_dev_uninit(dev);
2306         if (ret)
2307                 return ret;
2308
2309         return qede_eth_dev_init(dev);
2310 }
2311
2312 static const struct eth_dev_ops qede_eth_dev_ops = {
2313         .dev_configure = qede_dev_configure,
2314         .dev_infos_get = qede_dev_info_get,
2315         .rx_queue_setup = qede_rx_queue_setup,
2316         .rx_queue_release = qede_rx_queue_release,
2317         .rx_descriptor_status = qede_rx_descriptor_status,
2318         .tx_queue_setup = qede_tx_queue_setup,
2319         .tx_queue_release = qede_tx_queue_release,
2320         .dev_start = qede_dev_start,
2321         .dev_reset = qede_dev_reset,
2322         .dev_set_link_up = qede_dev_set_link_up,
2323         .dev_set_link_down = qede_dev_set_link_down,
2324         .link_update = qede_link_update,
2325         .promiscuous_enable = qede_promiscuous_enable,
2326         .promiscuous_disable = qede_promiscuous_disable,
2327         .allmulticast_enable = qede_allmulticast_enable,
2328         .allmulticast_disable = qede_allmulticast_disable,
2329         .set_mc_addr_list = qede_set_mc_addr_list,
2330         .dev_stop = qede_dev_stop,
2331         .dev_close = qede_dev_close,
2332         .stats_get = qede_get_stats,
2333         .stats_reset = qede_reset_stats,
2334         .xstats_get = qede_get_xstats,
2335         .xstats_reset = qede_reset_xstats,
2336         .xstats_get_names = qede_get_xstats_names,
2337         .mac_addr_add = qede_mac_addr_add,
2338         .mac_addr_remove = qede_mac_addr_remove,
2339         .mac_addr_set = qede_mac_addr_set,
2340         .vlan_offload_set = qede_vlan_offload_set,
2341         .vlan_filter_set = qede_vlan_filter_set,
2342         .flow_ctrl_set = qede_flow_ctrl_set,
2343         .flow_ctrl_get = qede_flow_ctrl_get,
2344         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2345         .rss_hash_update = qede_rss_hash_update,
2346         .rss_hash_conf_get = qede_rss_hash_conf_get,
2347         .reta_update  = qede_rss_reta_update,
2348         .reta_query  = qede_rss_reta_query,
2349         .mtu_set = qede_set_mtu,
2350         .filter_ctrl = qede_dev_filter_ctrl,
2351         .udp_tunnel_port_add = qede_udp_dst_port_add,
2352         .udp_tunnel_port_del = qede_udp_dst_port_del,
2353 };
2354
2355 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
2356         .dev_configure = qede_dev_configure,
2357         .dev_infos_get = qede_dev_info_get,
2358         .rx_queue_setup = qede_rx_queue_setup,
2359         .rx_queue_release = qede_rx_queue_release,
2360         .rx_descriptor_status = qede_rx_descriptor_status,
2361         .tx_queue_setup = qede_tx_queue_setup,
2362         .tx_queue_release = qede_tx_queue_release,
2363         .dev_start = qede_dev_start,
2364         .dev_reset = qede_dev_reset,
2365         .dev_set_link_up = qede_dev_set_link_up,
2366         .dev_set_link_down = qede_dev_set_link_down,
2367         .link_update = qede_link_update,
2368         .promiscuous_enable = qede_promiscuous_enable,
2369         .promiscuous_disable = qede_promiscuous_disable,
2370         .allmulticast_enable = qede_allmulticast_enable,
2371         .allmulticast_disable = qede_allmulticast_disable,
2372         .set_mc_addr_list = qede_set_mc_addr_list,
2373         .dev_stop = qede_dev_stop,
2374         .dev_close = qede_dev_close,
2375         .stats_get = qede_get_stats,
2376         .stats_reset = qede_reset_stats,
2377         .xstats_get = qede_get_xstats,
2378         .xstats_reset = qede_reset_xstats,
2379         .xstats_get_names = qede_get_xstats_names,
2380         .vlan_offload_set = qede_vlan_offload_set,
2381         .vlan_filter_set = qede_vlan_filter_set,
2382         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
2383         .rss_hash_update = qede_rss_hash_update,
2384         .rss_hash_conf_get = qede_rss_hash_conf_get,
2385         .reta_update  = qede_rss_reta_update,
2386         .reta_query  = qede_rss_reta_query,
2387         .mtu_set = qede_set_mtu,
2388         .udp_tunnel_port_add = qede_udp_dst_port_add,
2389         .udp_tunnel_port_del = qede_udp_dst_port_del,
2390         .mac_addr_add = qede_mac_addr_add,
2391         .mac_addr_remove = qede_mac_addr_remove,
2392         .mac_addr_set = qede_mac_addr_set,
2393 };
2394
2395 static void qede_update_pf_params(struct ecore_dev *edev)
2396 {
2397         struct ecore_pf_params pf_params;
2398
2399         memset(&pf_params, 0, sizeof(struct ecore_pf_params));
2400         pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
2401         pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
2402         qed_ops->common->update_pf_params(edev, &pf_params);
2403 }
2404
2405 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
2406 {
2407         struct rte_pci_device *pci_dev;
2408         struct rte_pci_addr pci_addr;
2409         struct qede_dev *adapter;
2410         struct ecore_dev *edev;
2411         struct qed_dev_eth_info dev_info;
2412         struct qed_slowpath_params params;
2413         static bool do_once = true;
2414         uint8_t bulletin_change;
2415         uint8_t vf_mac[ETHER_ADDR_LEN];
2416         uint8_t is_mac_forced;
2417         bool is_mac_exist;
2418         /* Fix up ecore debug level */
2419         uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
2420         uint8_t dp_level = ECORE_LEVEL_VERBOSE;
2421         uint32_t int_mode;
2422         int rc;
2423
2424         /* Extract key data structures */
2425         adapter = eth_dev->data->dev_private;
2426         adapter->ethdev = eth_dev;
2427         edev = &adapter->edev;
2428         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2429         pci_addr = pci_dev->addr;
2430
2431         PMD_INIT_FUNC_TRACE(edev);
2432
2433         snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
2434                  pci_addr.bus, pci_addr.devid, pci_addr.function,
2435                  eth_dev->data->port_id);
2436
2437         eth_dev->rx_pkt_burst = qede_recv_pkts;
2438         eth_dev->tx_pkt_burst = qede_xmit_pkts;
2439         eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
2440
2441         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2442                 DP_ERR(edev, "Skipping device init from secondary process\n");
2443                 return 0;
2444         }
2445
2446         rte_eth_copy_pci_info(eth_dev, pci_dev);
2447
2448         /* @DPDK */
2449         edev->vendor_id = pci_dev->id.vendor_id;
2450         edev->device_id = pci_dev->id.device_id;
2451
2452         qed_ops = qed_get_eth_ops();
2453         if (!qed_ops) {
2454                 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
2455                 return -EINVAL;
2456         }
2457
2458         DP_INFO(edev, "Starting qede probe\n");
2459         rc = qed_ops->common->probe(edev, pci_dev, dp_module,
2460                                     dp_level, is_vf);
2461         if (rc != 0) {
2462                 DP_ERR(edev, "qede probe failed rc %d\n", rc);
2463                 return -ENODEV;
2464         }
2465         qede_update_pf_params(edev);
2466
2467         switch (pci_dev->intr_handle.type) {
2468         case RTE_INTR_HANDLE_UIO_INTX:
2469         case RTE_INTR_HANDLE_VFIO_LEGACY:
2470                 int_mode = ECORE_INT_MODE_INTA;
2471                 rte_intr_callback_register(&pci_dev->intr_handle,
2472                                            qede_interrupt_handler_intx,
2473                                            (void *)eth_dev);
2474                 break;
2475         default:
2476                 int_mode = ECORE_INT_MODE_MSIX;
2477                 rte_intr_callback_register(&pci_dev->intr_handle,
2478                                            qede_interrupt_handler,
2479                                            (void *)eth_dev);
2480         }
2481
2482         if (rte_intr_enable(&pci_dev->intr_handle)) {
2483                 DP_ERR(edev, "rte_intr_enable() failed\n");
2484                 return -ENODEV;
2485         }
2486
2487         /* Start the Slowpath-process */
2488         memset(&params, 0, sizeof(struct qed_slowpath_params));
2489
2490         params.int_mode = int_mode;
2491         params.drv_major = QEDE_PMD_VERSION_MAJOR;
2492         params.drv_minor = QEDE_PMD_VERSION_MINOR;
2493         params.drv_rev = QEDE_PMD_VERSION_REVISION;
2494         params.drv_eng = QEDE_PMD_VERSION_PATCH;
2495         strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
2496                 QEDE_PMD_DRV_VER_STR_SIZE);
2497
2498         /* For CMT mode device do periodic polling for slowpath events.
2499          * This is required since uio device uses only one MSI-x
2500          * interrupt vector but we need one for each engine.
2501          */
2502         if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
2503                 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
2504                                        qede_poll_sp_sb_cb,
2505                                        (void *)eth_dev);
2506                 if (rc != 0) {
2507                         DP_ERR(edev, "Unable to start periodic"
2508                                      " timer rc %d\n", rc);
2509                         return -EINVAL;
2510                 }
2511         }
2512
2513         rc = qed_ops->common->slowpath_start(edev, &params);
2514         if (rc) {
2515                 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
2516                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2517                                      (void *)eth_dev);
2518                 return -ENODEV;
2519         }
2520
2521         rc = qed_ops->fill_dev_info(edev, &dev_info);
2522         if (rc) {
2523                 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
2524                 qed_ops->common->slowpath_stop(edev);
2525                 qed_ops->common->remove(edev);
2526                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2527                                      (void *)eth_dev);
2528                 return -ENODEV;
2529         }
2530
2531         qede_alloc_etherdev(adapter, &dev_info);
2532
2533         adapter->ops->common->set_name(edev, edev->name);
2534
2535         if (!is_vf)
2536                 adapter->dev_info.num_mac_filters =
2537                         (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
2538                                             ECORE_MAC);
2539         else
2540                 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
2541                                 (uint32_t *)&adapter->dev_info.num_mac_filters);
2542
2543         /* Allocate memory for storing MAC addr */
2544         eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
2545                                         (ETHER_ADDR_LEN *
2546                                         adapter->dev_info.num_mac_filters),
2547                                         RTE_CACHE_LINE_SIZE);
2548
2549         if (eth_dev->data->mac_addrs == NULL) {
2550                 DP_ERR(edev, "Failed to allocate MAC address\n");
2551                 qed_ops->common->slowpath_stop(edev);
2552                 qed_ops->common->remove(edev);
2553                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
2554                                      (void *)eth_dev);
2555                 return -ENOMEM;
2556         }
2557
2558         if (!is_vf) {
2559                 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
2560                                 hw_info.hw_mac_addr,
2561                                 &eth_dev->data->mac_addrs[0]);
2562                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2563                                 &adapter->primary_mac);
2564         } else {
2565                 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
2566                                        &bulletin_change);
2567                 if (bulletin_change) {
2568                         is_mac_exist =
2569                             ecore_vf_bulletin_get_forced_mac(
2570                                                 ECORE_LEADING_HWFN(edev),
2571                                                 vf_mac,
2572                                                 &is_mac_forced);
2573                         if (is_mac_exist) {
2574                                 DP_INFO(edev, "VF macaddr received from PF\n");
2575                                 ether_addr_copy((struct ether_addr *)&vf_mac,
2576                                                 &eth_dev->data->mac_addrs[0]);
2577                                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
2578                                                 &adapter->primary_mac);
2579                         } else {
2580                                 DP_ERR(edev, "No VF macaddr assigned\n");
2581                         }
2582                 }
2583         }
2584
2585         eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
2586
2587         if (do_once) {
2588                 qede_print_adapter_info(adapter);
2589                 do_once = false;
2590         }
2591
2592         /* Bring-up the link */
2593         qede_dev_set_link_state(eth_dev, true);
2594
2595         adapter->num_tx_queues = 0;
2596         adapter->num_rx_queues = 0;
2597         SLIST_INIT(&adapter->arfs_info.arfs_list_head);
2598         SLIST_INIT(&adapter->vlan_list_head);
2599         SLIST_INIT(&adapter->uc_list_head);
2600         SLIST_INIT(&adapter->mc_list_head);
2601         adapter->mtu = ETHER_MTU;
2602         adapter->vport_started = false;
2603
2604         /* VF tunnel offloads is enabled by default in PF driver */
2605         adapter->vxlan.num_filters = 0;
2606         adapter->geneve.num_filters = 0;
2607         adapter->ipgre.num_filters = 0;
2608         if (is_vf) {
2609                 adapter->vxlan.enable = true;
2610                 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
2611                                              ETH_TUNNEL_FILTER_IVLAN;
2612                 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
2613                 adapter->geneve.enable = true;
2614                 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
2615                                               ETH_TUNNEL_FILTER_IVLAN;
2616                 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
2617                 adapter->ipgre.enable = true;
2618                 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
2619                                              ETH_TUNNEL_FILTER_IVLAN;
2620         } else {
2621                 adapter->vxlan.enable = false;
2622                 adapter->geneve.enable = false;
2623                 adapter->ipgre.enable = false;
2624         }
2625
2626         DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
2627                 adapter->primary_mac.addr_bytes[0],
2628                 adapter->primary_mac.addr_bytes[1],
2629                 adapter->primary_mac.addr_bytes[2],
2630                 adapter->primary_mac.addr_bytes[3],
2631                 adapter->primary_mac.addr_bytes[4],
2632                 adapter->primary_mac.addr_bytes[5]);
2633
2634         DP_INFO(edev, "Device initialized\n");
2635
2636         return 0;
2637 }
2638
2639 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
2640 {
2641         return qede_common_dev_init(eth_dev, 1);
2642 }
2643
2644 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
2645 {
2646         return qede_common_dev_init(eth_dev, 0);
2647 }
2648
2649 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
2650 {
2651 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
2652         struct qede_dev *qdev = eth_dev->data->dev_private;
2653         struct ecore_dev *edev = &qdev->edev;
2654
2655         PMD_INIT_FUNC_TRACE(edev);
2656 #endif
2657
2658         /* only uninitialize in the primary process */
2659         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2660                 return 0;
2661
2662         /* safe to close dev here */
2663         qede_dev_close(eth_dev);
2664
2665         eth_dev->dev_ops = NULL;
2666         eth_dev->rx_pkt_burst = NULL;
2667         eth_dev->tx_pkt_burst = NULL;
2668
2669         if (eth_dev->data->mac_addrs)
2670                 rte_free(eth_dev->data->mac_addrs);
2671
2672         eth_dev->data->mac_addrs = NULL;
2673
2674         return 0;
2675 }
2676
2677 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2678 {
2679         return qede_dev_common_uninit(eth_dev);
2680 }
2681
2682 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2683 {
2684         return qede_dev_common_uninit(eth_dev);
2685 }
2686
2687 static const struct rte_pci_id pci_id_qedevf_map[] = {
2688 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2689         {
2690                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
2691         },
2692         {
2693                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
2694         },
2695         {
2696                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
2697         },
2698         {.vendor_id = 0,}
2699 };
2700
2701 static const struct rte_pci_id pci_id_qede_map[] = {
2702 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
2703         {
2704                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
2705         },
2706         {
2707                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
2708         },
2709         {
2710                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
2711         },
2712         {
2713                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
2714         },
2715         {
2716                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
2717         },
2718         {
2719                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
2720         },
2721         {
2722                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
2723         },
2724         {
2725                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
2726         },
2727         {
2728                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
2729         },
2730         {
2731                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
2732         },
2733         {.vendor_id = 0,}
2734 };
2735
2736 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2737         struct rte_pci_device *pci_dev)
2738 {
2739         return rte_eth_dev_pci_generic_probe(pci_dev,
2740                 sizeof(struct qede_dev), qedevf_eth_dev_init);
2741 }
2742
2743 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2744 {
2745         return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
2746 }
2747
2748 static struct rte_pci_driver rte_qedevf_pmd = {
2749         .id_table = pci_id_qedevf_map,
2750         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2751         .probe = qedevf_eth_dev_pci_probe,
2752         .remove = qedevf_eth_dev_pci_remove,
2753 };
2754
2755 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2756         struct rte_pci_device *pci_dev)
2757 {
2758         return rte_eth_dev_pci_generic_probe(pci_dev,
2759                 sizeof(struct qede_dev), qede_eth_dev_init);
2760 }
2761
2762 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2763 {
2764         return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
2765 }
2766
2767 static struct rte_pci_driver rte_qede_pmd = {
2768         .id_table = pci_id_qede_map,
2769         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2770         .probe = qede_eth_dev_pci_probe,
2771         .remove = qede_eth_dev_pci_remove,
2772 };
2773
2774 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
2775 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
2776 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
2777 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
2778 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
2779 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
2780
2781 RTE_INIT(qede_init_log)
2782 {
2783         qede_logtype_init = rte_log_register("pmd.net.qede.init");
2784         if (qede_logtype_init >= 0)
2785                 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
2786         qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
2787         if (qede_logtype_driver >= 0)
2788                 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);
2789 }