net/qede: fix driver version string
[dpdk.git] / drivers / net / qede / qede_ethdev.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "qede_ethdev.h"
10 #include <rte_alarm.h>
11 #include <rte_version.h>
12
13 /* Globals */
14 static const struct qed_eth_ops *qed_ops;
15 static const char *drivername = "qede pmd";
16 static int64_t timer_period = 1;
17
18 struct rte_qede_xstats_name_off {
19         char name[RTE_ETH_XSTATS_NAME_SIZE];
20         uint64_t offset;
21 };
22
23 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
24         {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
25         {"rx_multicast_bytes",
26                 offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
27         {"rx_broadcast_bytes",
28                 offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
29         {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
30         {"rx_multicast_packets",
31                 offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
32         {"rx_broadcast_packets",
33                 offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
34
35         {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
36         {"tx_multicast_bytes",
37                 offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
38         {"tx_broadcast_bytes",
39                 offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
40         {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
41         {"tx_multicast_packets",
42                 offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
43         {"tx_broadcast_packets",
44                 offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
45
46         {"rx_64_byte_packets",
47                 offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
48         {"rx_65_to_127_byte_packets",
49                 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
50         {"rx_128_to_255_byte_packets",
51                 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
52         {"rx_256_to_511_byte_packets",
53                 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
54         {"rx_512_to_1023_byte_packets",
55                 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
56         {"rx_1024_to_1518_byte_packets",
57                 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
58         {"rx_1519_to_1522_byte_packets",
59                 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
60         {"rx_1519_to_2047_byte_packets",
61                 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
62         {"rx_2048_to_4095_byte_packets",
63                 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
64         {"rx_4096_to_9216_byte_packets",
65                 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
66         {"rx_9217_to_16383_byte_packets",
67                 offsetof(struct ecore_eth_stats,
68                          rx_9217_to_16383_byte_packets)},
69         {"tx_64_byte_packets",
70                 offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
71         {"tx_65_to_127_byte_packets",
72                 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
73         {"tx_128_to_255_byte_packets",
74                 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
75         {"tx_256_to_511_byte_packets",
76                 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
77         {"tx_512_to_1023_byte_packets",
78                 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
79         {"tx_1024_to_1518_byte_packets",
80                 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
81         {"trx_1519_to_1522_byte_packets",
82                 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
83         {"tx_2048_to_4095_byte_packets",
84                 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
85         {"tx_4096_to_9216_byte_packets",
86                 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
87         {"tx_9217_to_16383_byte_packets",
88                 offsetof(struct ecore_eth_stats,
89                          tx_9217_to_16383_byte_packets)},
90
91         {"rx_mac_crtl_frames",
92                 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
93         {"tx_mac_control_frames",
94                 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
95         {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
96         {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
97         {"rx_priority_flow_control_frames",
98                 offsetof(struct ecore_eth_stats, rx_pfc_frames)},
99         {"tx_priority_flow_control_frames",
100                 offsetof(struct ecore_eth_stats, tx_pfc_frames)},
101
102         {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
103         {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
104         {"rx_carrier_errors",
105                 offsetof(struct ecore_eth_stats, rx_carrier_errors)},
106         {"rx_oversize_packet_errors",
107                 offsetof(struct ecore_eth_stats, rx_oversize_packets)},
108         {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
109         {"rx_undersize_packet_errors",
110                 offsetof(struct ecore_eth_stats, rx_undersize_packets)},
111         {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
112         {"rx_host_buffer_not_available",
113                 offsetof(struct ecore_eth_stats, no_buff_discards)},
114         /* Number of packets discarded because they are bigger than MTU */
115         {"rx_packet_too_big_discards",
116                 offsetof(struct ecore_eth_stats, packet_too_big_discard)},
117         {"rx_ttl_zero_discards",
118                 offsetof(struct ecore_eth_stats, ttl0_discard)},
119         {"rx_multi_function_tag_filter_discards",
120                 offsetof(struct ecore_eth_stats, mftag_filter_discards)},
121         {"rx_mac_filter_discards",
122                 offsetof(struct ecore_eth_stats, mac_filter_discards)},
123         {"rx_hw_buffer_truncates",
124                 offsetof(struct ecore_eth_stats, brb_truncates)},
125         {"rx_hw_buffer_discards",
126                 offsetof(struct ecore_eth_stats, brb_discards)},
127         {"tx_lpi_entry_count",
128                 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
129         {"tx_total_collisions",
130                 offsetof(struct ecore_eth_stats, tx_total_collisions)},
131         {"tx_error_drop_packets",
132                 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
133
134         {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
135         {"rx_mac_unicast_packets",
136                 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
137         {"rx_mac_multicast_packets",
138                 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
139         {"rx_mac_broadcast_packets",
140                 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
141         {"rx_mac_frames_ok",
142                 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
143         {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
144         {"tx_mac_unicast_packets",
145                 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
146         {"tx_mac_multicast_packets",
147                 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
148         {"tx_mac_broadcast_packets",
149                 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
150
151         {"lro_coalesced_packets",
152                 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
153         {"lro_coalesced_events",
154                 offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
155         {"lro_aborts_num",
156                 offsetof(struct ecore_eth_stats, tpa_aborts_num)},
157         {"lro_not_coalesced_packets",
158                 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
159         {"lro_coalesced_bytes",
160                 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
161 };
162
163 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
164 {
165         ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
166 }
167
168 static void
169 qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
170 {
171         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
172         struct qede_dev *qdev = eth_dev->data->dev_private;
173         struct ecore_dev *edev = &qdev->edev;
174
175         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
176         if (rte_intr_enable(&eth_dev->pci_dev->intr_handle))
177                 DP_ERR(edev, "rte_intr_enable failed\n");
178 }
179
180 static void
181 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
182 {
183         rte_memcpy(&qdev->dev_info, info, sizeof(*info));
184         qdev->num_tc = qdev->dev_info.num_tc;
185         qdev->ops = qed_ops;
186 }
187
188 static void qede_print_adapter_info(struct qede_dev *qdev)
189 {
190         struct ecore_dev *edev = &qdev->edev;
191         struct qed_dev_info *info = &qdev->dev_info.common;
192         static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
193         static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
194
195         DP_INFO(edev, "*********************************\n");
196         DP_INFO(edev, " DPDK version:%s\n", rte_version());
197         DP_INFO(edev, " Chip details : %s%d\n",
198                   ECORE_IS_BB(edev) ? "BB" : "AH",
199                   CHIP_REV_IS_A0(edev) ? 0 : 1);
200         snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
201                  info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
202         snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
203                  ver_str, QEDE_PMD_VERSION);
204         DP_INFO(edev, " Driver version : %s\n", drv_ver);
205         DP_INFO(edev, " Firmware version : %s\n", ver_str);
206
207         snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
208                  "%d.%d.%d.%d",
209                 (info->mfw_rev >> 24) & 0xff,
210                 (info->mfw_rev >> 16) & 0xff,
211                 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
212         DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
213         DP_INFO(edev, " Firmware file : %s\n", fw_file);
214         DP_INFO(edev, "*********************************\n");
215 }
216
217 static int
218 qede_set_ucast_rx_mac(struct qede_dev *qdev,
219                       enum qed_filter_xcast_params_type opcode,
220                       uint8_t mac[ETHER_ADDR_LEN])
221 {
222         struct ecore_dev *edev = &qdev->edev;
223         struct qed_filter_params filter_cmd;
224
225         memset(&filter_cmd, 0, sizeof(filter_cmd));
226         filter_cmd.type = QED_FILTER_TYPE_UCAST;
227         filter_cmd.filter.ucast.type = opcode;
228         filter_cmd.filter.ucast.mac_valid = 1;
229         rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
230         return qdev->ops->filter_config(edev, &filter_cmd);
231 }
232
233 static void
234 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
235                   uint32_t index, __rte_unused uint32_t pool)
236 {
237         struct qede_dev *qdev = eth_dev->data->dev_private;
238         struct ecore_dev *edev = &qdev->edev;
239         int rc;
240
241         PMD_INIT_FUNC_TRACE(edev);
242
243         if (index >= qdev->dev_info.num_mac_addrs) {
244                 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
245                        index, qdev->dev_info.num_mac_addrs);
246                 return;
247         }
248
249         /* Adding macaddr even though promiscuous mode is set */
250         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
251                 DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
252
253         /* Add MAC filters according to the unicast secondary macs */
254         rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
255                                    mac_addr->addr_bytes);
256         if (rc)
257                 DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
258 }
259
260 static void
261 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
262 {
263         struct qede_dev *qdev = eth_dev->data->dev_private;
264         struct ecore_dev *edev = &qdev->edev;
265         struct ether_addr mac_addr;
266         int rc;
267
268         PMD_INIT_FUNC_TRACE(edev);
269
270         if (index >= qdev->dev_info.num_mac_addrs) {
271                 DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
272                        index, qdev->dev_info.num_mac_addrs);
273                 return;
274         }
275
276         /* Use the index maintained by rte */
277         ether_addr_copy(&eth_dev->data->mac_addrs[index], &mac_addr);
278         rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
279                                    mac_addr.addr_bytes);
280         if (rc)
281                 DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc);
282 }
283
284 static void
285 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
286 {
287         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
288         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
289         int rc;
290
291         if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
292                                                mac_addr->addr_bytes)) {
293                 DP_ERR(edev, "Setting MAC address is not allowed\n");
294                 ether_addr_copy(&qdev->primary_mac,
295                                 &eth_dev->data->mac_addrs[0]);
296                 return;
297         }
298
299         /* First remove the primary mac */
300         rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
301                                    qdev->primary_mac.addr_bytes);
302
303         if (rc) {
304                 DP_ERR(edev, "Unable to remove current macaddr"
305                              " Reverting to previous default mac\n");
306                 ether_addr_copy(&qdev->primary_mac,
307                                 &eth_dev->data->mac_addrs[0]);
308                 return;
309         }
310
311         /* Add new MAC */
312         rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
313                                    mac_addr->addr_bytes);
314
315         if (rc)
316                 DP_ERR(edev, "Unable to add new default mac\n");
317         else
318                 ether_addr_copy(mac_addr, &qdev->primary_mac);
319 }
320
321
322
323
324 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
325 {
326         struct ecore_dev *edev = &qdev->edev;
327         struct qed_update_vport_params params = {
328                 .vport_id = 0,
329                 .accept_any_vlan = action,
330                 .update_accept_any_vlan_flg = 1,
331         };
332         int rc;
333
334         /* Proceed only if action actually needs to be performed */
335         if (qdev->accept_any_vlan == action)
336                 return;
337
338         rc = qdev->ops->vport_update(edev, &params);
339         if (rc) {
340                 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
341                        action ? "enable" : "disable");
342         } else {
343                 DP_INFO(edev, "%s accept-any-vlan\n",
344                         action ? "enabled" : "disabled");
345                 qdev->accept_any_vlan = action;
346         }
347 }
348
349 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
350 {
351         struct qed_update_vport_params vport_update_params;
352         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
353         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
354         int rc;
355
356         memset(&vport_update_params, 0, sizeof(vport_update_params));
357         vport_update_params.vport_id = 0;
358         vport_update_params.update_inner_vlan_removal_flg = 1;
359         vport_update_params.inner_vlan_removal_flg = set_stripping;
360         rc = qdev->ops->vport_update(edev, &vport_update_params);
361         if (rc) {
362                 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
363                 return rc;
364         }
365
366         return 0;
367 }
368
369 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
370 {
371         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
372         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
373         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
374
375         if (mask & ETH_VLAN_STRIP_MASK) {
376                 if (rxmode->hw_vlan_strip)
377                         (void)qede_vlan_stripping(eth_dev, 1);
378                 else
379                         (void)qede_vlan_stripping(eth_dev, 0);
380         }
381
382         if (mask & ETH_VLAN_FILTER_MASK) {
383                 /* VLAN filtering kicks in when a VLAN is added */
384                 if (rxmode->hw_vlan_filter) {
385                         qede_vlan_filter_set(eth_dev, 0, 1);
386                 } else {
387                         if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
388                                 DP_NOTICE(edev, false,
389                                   " Please remove existing VLAN filters"
390                                   " before disabling VLAN filtering\n");
391                                 /* Signal app that VLAN filtering is still
392                                  * enabled
393                                  */
394                                 rxmode->hw_vlan_filter = true;
395                         } else {
396                                 qede_vlan_filter_set(eth_dev, 0, 0);
397                         }
398                 }
399         }
400
401         if (mask & ETH_VLAN_EXTEND_MASK)
402                 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
403                         " and classification is based on outer tag only\n");
404
405         DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
406                 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
407 }
408
409 static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
410                                   enum qed_filter_xcast_params_type opcode,
411                                   uint16_t vid)
412 {
413         struct qed_filter_params filter_cmd;
414         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
415
416         memset(&filter_cmd, 0, sizeof(filter_cmd));
417         filter_cmd.type = QED_FILTER_TYPE_UCAST;
418         filter_cmd.filter.ucast.type = opcode;
419         filter_cmd.filter.ucast.vlan_valid = 1;
420         filter_cmd.filter.ucast.vlan = vid;
421
422         return qdev->ops->filter_config(edev, &filter_cmd);
423 }
424
425 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
426                                 uint16_t vlan_id, int on)
427 {
428         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
429         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
430         struct qed_dev_eth_info *dev_info = &qdev->dev_info;
431         struct qede_vlan_entry *tmp = NULL;
432         struct qede_vlan_entry *vlan;
433         int rc;
434
435         if (on) {
436                 if (qdev->configured_vlans == dev_info->num_vlan_filters) {
437                         DP_INFO(edev, "Reached max VLAN filter limit"
438                                       " enabling accept_any_vlan\n");
439                         qede_config_accept_any_vlan(qdev, true);
440                         return 0;
441                 }
442
443                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
444                         if (tmp->vid == vlan_id) {
445                                 DP_ERR(edev, "VLAN %u already configured\n",
446                                        vlan_id);
447                                 return -EEXIST;
448                         }
449                 }
450
451                 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
452                                   RTE_CACHE_LINE_SIZE);
453
454                 if (!vlan) {
455                         DP_ERR(edev, "Did not allocate memory for VLAN\n");
456                         return -ENOMEM;
457                 }
458
459                 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
460                                             vlan_id);
461                 if (rc) {
462                         DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
463                                rc);
464                         rte_free(vlan);
465                 } else {
466                         vlan->vid = vlan_id;
467                         SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
468                         qdev->configured_vlans++;
469                         DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
470                                 vlan_id, qdev->configured_vlans);
471                 }
472         } else {
473                 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
474                         if (tmp->vid == vlan_id)
475                                 break;
476                 }
477
478                 if (!tmp) {
479                         if (qdev->configured_vlans == 0) {
480                                 DP_INFO(edev,
481                                         "No VLAN filters configured yet\n");
482                                 return 0;
483                         }
484
485                         DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
486                         return -EINVAL;
487                 }
488
489                 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
490
491                 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
492                                             vlan_id);
493                 if (rc) {
494                         DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
495                                vlan_id, rc);
496                 } else {
497                         qdev->configured_vlans--;
498                         DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
499                                 vlan_id, qdev->configured_vlans);
500                 }
501         }
502
503         return rc;
504 }
505
506 static int qede_init_vport(struct qede_dev *qdev)
507 {
508         struct ecore_dev *edev = &qdev->edev;
509         struct qed_start_vport_params start = {0};
510         int rc;
511
512         start.remove_inner_vlan = 1;
513         start.gro_enable = 0;
514         start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
515         start.vport_id = 0;
516         start.drop_ttl0 = false;
517         start.clear_stats = 1;
518         start.handle_ptp_pkts = 0;
519
520         rc = qdev->ops->vport_start(edev, &start);
521         if (rc) {
522                 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
523                 return rc;
524         }
525
526         DP_INFO(edev,
527                 "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
528                 start.vport_id, ETHER_MTU);
529
530         return 0;
531 }
532
533 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
534 {
535         struct qede_dev *qdev = eth_dev->data->dev_private;
536         struct ecore_dev *edev = &qdev->edev;
537         struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
538         int rc, i, j;
539
540         PMD_INIT_FUNC_TRACE(edev);
541
542         /* Check requirements for 100G mode */
543         if (edev->num_hwfns > 1) {
544                 if (eth_dev->data->nb_rx_queues < 2 ||
545                     eth_dev->data->nb_tx_queues < 2) {
546                         DP_NOTICE(edev, false,
547                                   "100G mode needs min. 2 RX/TX queues\n");
548                         return -EINVAL;
549                 }
550
551                 if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
552                     (eth_dev->data->nb_tx_queues % 2 != 0)) {
553                         DP_NOTICE(edev, false,
554                                   "100G mode needs even no. of RX/TX queues\n");
555                         return -EINVAL;
556                 }
557         }
558
559         /* Sanity checks and throw warnings */
560         if (rxmode->enable_scatter == 1)
561                 eth_dev->data->scattered_rx = 1;
562
563         if (rxmode->enable_lro == 1) {
564                 DP_INFO(edev, "LRO is not supported\n");
565                 return -EINVAL;
566         }
567
568         if (!rxmode->hw_strip_crc)
569                 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
570
571         if (!rxmode->hw_ip_checksum)
572                 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
573                               "in hw\n");
574
575         /* Check for the port restart case */
576         if (qdev->state != QEDE_DEV_INIT) {
577                 rc = qdev->ops->vport_stop(edev, 0);
578                 if (rc != 0)
579                         return rc;
580                 qede_dealloc_fp_resc(eth_dev);
581         }
582
583         qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
584         qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
585         qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
586
587         /* Fastpath status block should be initialized before sending
588          * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
589          */
590         rc = qede_alloc_fp_resc(qdev);
591         if (rc != 0)
592                 return rc;
593
594         /* Issue VPORT-START with default config values to allow
595          * other port configurations early on.
596          */
597         rc = qede_init_vport(qdev);
598         if (rc != 0)
599                 return rc;
600
601         SLIST_INIT(&qdev->vlan_list_head);
602
603         /* Add primary mac for PF */
604         if (IS_PF(edev))
605                 qede_mac_addr_set(eth_dev, &qdev->primary_mac);
606
607         /* Enable VLAN offloads by default */
608         qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
609                                        ETH_VLAN_FILTER_MASK |
610                                        ETH_VLAN_EXTEND_MASK);
611
612         qdev->state = QEDE_DEV_CONFIG;
613
614         DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
615                 (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
616                 qdev->num_tc);
617
618         return 0;
619 }
620
621 /* Info about HW descriptor ring limitations */
622 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
623         .nb_max = NUM_RX_BDS_MAX,
624         .nb_min = 128,
625         .nb_align = 128 /* lowest common multiple */
626 };
627
628 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
629         .nb_max = NUM_TX_BDS_MAX,
630         .nb_min = 256,
631         .nb_align = 256
632 };
633
634 static void
635 qede_dev_info_get(struct rte_eth_dev *eth_dev,
636                   struct rte_eth_dev_info *dev_info)
637 {
638         struct qede_dev *qdev = eth_dev->data->dev_private;
639         struct ecore_dev *edev = &qdev->edev;
640
641         PMD_INIT_FUNC_TRACE(edev);
642
643         dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
644                                               QEDE_ETH_OVERHEAD);
645         dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
646         dev_info->rx_desc_lim = qede_rx_desc_lim;
647         dev_info->tx_desc_lim = qede_tx_desc_lim;
648         dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
649         dev_info->max_tx_queues = dev_info->max_rx_queues;
650         dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
651         if (IS_VF(edev))
652                 dev_info->max_vfs = 0;
653         else
654                 dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
655         dev_info->driver_name = qdev->drv_ver;
656         dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
657         dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
658
659         dev_info->default_txconf = (struct rte_eth_txconf) {
660                 .txq_flags = QEDE_TXQ_FLAGS,
661         };
662
663         dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
664                                      DEV_RX_OFFLOAD_IPV4_CKSUM |
665                                      DEV_RX_OFFLOAD_UDP_CKSUM |
666                                      DEV_RX_OFFLOAD_TCP_CKSUM);
667         dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
668                                      DEV_TX_OFFLOAD_IPV4_CKSUM |
669                                      DEV_TX_OFFLOAD_UDP_CKSUM |
670                                      DEV_TX_OFFLOAD_TCP_CKSUM);
671
672         dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
673                                ETH_LINK_SPEED_100G;
674 }
675
676 /* return 0 means link status changed, -1 means not changed */
677 static int
678 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
679 {
680         struct qede_dev *qdev = eth_dev->data->dev_private;
681         struct ecore_dev *edev = &qdev->edev;
682         uint16_t link_duplex;
683         struct qed_link_output link;
684         struct rte_eth_link *curr = &eth_dev->data->dev_link;
685
686         memset(&link, 0, sizeof(struct qed_link_output));
687         qdev->ops->common->get_link(edev, &link);
688
689         /* Link Speed */
690         curr->link_speed = link.speed;
691
692         /* Link Mode */
693         switch (link.duplex) {
694         case QEDE_DUPLEX_HALF:
695                 link_duplex = ETH_LINK_HALF_DUPLEX;
696                 break;
697         case QEDE_DUPLEX_FULL:
698                 link_duplex = ETH_LINK_FULL_DUPLEX;
699                 break;
700         case QEDE_DUPLEX_UNKNOWN:
701         default:
702                 link_duplex = -1;
703         }
704         curr->link_duplex = link_duplex;
705
706         /* Link Status */
707         curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
708
709         /* AN */
710         curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
711                              ETH_LINK_AUTONEG : ETH_LINK_FIXED;
712
713         DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
714                 curr->link_speed, curr->link_duplex,
715                 curr->link_autoneg, curr->link_status);
716
717         /* return 0 means link status changed, -1 means not changed */
718         return ((curr->link_status == link.link_up) ? -1 : 0);
719 }
720
721 static void
722 qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
723                      enum qed_filter_rx_mode_type accept_flags)
724 {
725         struct qede_dev *qdev = eth_dev->data->dev_private;
726         struct ecore_dev *edev = &qdev->edev;
727         struct qed_filter_params rx_mode;
728
729         DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
730
731         memset(&rx_mode, 0, sizeof(struct qed_filter_params));
732         rx_mode.type = QED_FILTER_TYPE_RX_MODE;
733         rx_mode.filter.accept_flags = accept_flags;
734         qdev->ops->filter_config(edev, &rx_mode);
735 }
736
737 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
738 {
739         struct qede_dev *qdev = eth_dev->data->dev_private;
740         struct ecore_dev *edev = &qdev->edev;
741
742         PMD_INIT_FUNC_TRACE(edev);
743
744         enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
745
746         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
747                 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
748
749         qede_rx_mode_setting(eth_dev, type);
750 }
751
752 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
753 {
754         struct qede_dev *qdev = eth_dev->data->dev_private;
755         struct ecore_dev *edev = &qdev->edev;
756
757         PMD_INIT_FUNC_TRACE(edev);
758
759         if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
760                 qede_rx_mode_setting(eth_dev,
761                                      QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
762         else
763                 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
764 }
765
766 static void qede_poll_sp_sb_cb(void *param)
767 {
768         struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
769         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
770         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
771         int rc;
772
773         qede_interrupt_action(ECORE_LEADING_HWFN(edev));
774         qede_interrupt_action(&edev->hwfns[1]);
775
776         rc = rte_eal_alarm_set(timer_period * US_PER_S,
777                                qede_poll_sp_sb_cb,
778                                (void *)eth_dev);
779         if (rc != 0) {
780                 DP_ERR(edev, "Unable to start periodic"
781                              " timer rc %d\n", rc);
782                 assert(false && "Unable to start periodic timer");
783         }
784 }
785
786 static void qede_dev_close(struct rte_eth_dev *eth_dev)
787 {
788         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
789         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
790         int rc;
791
792         PMD_INIT_FUNC_TRACE(edev);
793
794         /* dev_stop() shall cleanup fp resources in hw but without releasing
795          * dma memories and sw structures so that dev_start() can be called
796          * by the app without reconfiguration. However, in dev_close() we
797          * can release all the resources and device can be brought up newly
798          */
799         if (qdev->state != QEDE_DEV_STOP)
800                 qede_dev_stop(eth_dev);
801         else
802                 DP_INFO(edev, "Device is already stopped\n");
803
804         rc = qdev->ops->vport_stop(edev, 0);
805         if (rc != 0)
806                 DP_ERR(edev, "Failed to stop VPORT\n");
807
808         qede_dealloc_fp_resc(eth_dev);
809
810         qdev->ops->common->slowpath_stop(edev);
811
812         qdev->ops->common->remove(edev);
813
814         rte_intr_disable(&eth_dev->pci_dev->intr_handle);
815
816         rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
817                                      qede_interrupt_handler, (void *)eth_dev);
818
819         if (edev->num_hwfns > 1)
820                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
821
822         qdev->state = QEDE_DEV_INIT; /* Go back to init state */
823 }
824
825 static void
826 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
827 {
828         struct qede_dev *qdev = eth_dev->data->dev_private;
829         struct ecore_dev *edev = &qdev->edev;
830         struct ecore_eth_stats stats;
831
832         qdev->ops->get_vport_stats(edev, &stats);
833
834         /* RX Stats */
835         eth_stats->ipackets = stats.rx_ucast_pkts +
836             stats.rx_mcast_pkts + stats.rx_bcast_pkts;
837
838         eth_stats->ibytes = stats.rx_ucast_bytes +
839             stats.rx_mcast_bytes + stats.rx_bcast_bytes;
840
841         eth_stats->ierrors = stats.rx_crc_errors +
842             stats.rx_align_errors +
843             stats.rx_carrier_errors +
844             stats.rx_oversize_packets +
845             stats.rx_jabbers + stats.rx_undersize_packets;
846
847         eth_stats->rx_nombuf = stats.no_buff_discards;
848
849         eth_stats->imissed = stats.mftag_filter_discards +
850             stats.mac_filter_discards +
851             stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
852
853         /* TX stats */
854         eth_stats->opackets = stats.tx_ucast_pkts +
855             stats.tx_mcast_pkts + stats.tx_bcast_pkts;
856
857         eth_stats->obytes = stats.tx_ucast_bytes +
858             stats.tx_mcast_bytes + stats.tx_bcast_bytes;
859
860         eth_stats->oerrors = stats.tx_err_drop_pkts;
861 }
862
863 static int
864 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
865                       struct rte_eth_xstat_name *xstats_names, unsigned limit)
866 {
867         unsigned int i, stat_cnt = RTE_DIM(qede_xstats_strings);
868
869         if (xstats_names != NULL)
870                 for (i = 0; i < stat_cnt; i++)
871                         snprintf(xstats_names[i].name,
872                                 sizeof(xstats_names[i].name),
873                                 "%s",
874                                 qede_xstats_strings[i].name);
875
876         return stat_cnt;
877 }
878
879 static int
880 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
881                 unsigned int n)
882 {
883         struct qede_dev *qdev = dev->data->dev_private;
884         struct ecore_dev *edev = &qdev->edev;
885         struct ecore_eth_stats stats;
886         unsigned int num = RTE_DIM(qede_xstats_strings);
887
888         if (n < num)
889                 return num;
890
891         qdev->ops->get_vport_stats(edev, &stats);
892
893         for (num = 0; num < n; num++)
894                 xstats[num].value = *(u64 *)(((char *)&stats) +
895                                              qede_xstats_strings[num].offset);
896
897         return num;
898 }
899
900 static void
901 qede_reset_xstats(struct rte_eth_dev *dev)
902 {
903         struct qede_dev *qdev = dev->data->dev_private;
904         struct ecore_dev *edev = &qdev->edev;
905
906         ecore_reset_vport_stats(edev);
907 }
908
909 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
910 {
911         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
912         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
913         struct qed_link_params link_params;
914         int rc;
915
916         DP_INFO(edev, "setting link state %d\n", link_up);
917         memset(&link_params, 0, sizeof(link_params));
918         link_params.link_up = link_up;
919         rc = qdev->ops->common->set_link(edev, &link_params);
920         if (rc != ECORE_SUCCESS)
921                 DP_ERR(edev, "Unable to set link state %d\n", link_up);
922
923         return rc;
924 }
925
926 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
927 {
928         return qede_dev_set_link_state(eth_dev, true);
929 }
930
931 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
932 {
933         return qede_dev_set_link_state(eth_dev, false);
934 }
935
936 static void qede_reset_stats(struct rte_eth_dev *eth_dev)
937 {
938         struct qede_dev *qdev = eth_dev->data->dev_private;
939         struct ecore_dev *edev = &qdev->edev;
940
941         ecore_reset_vport_stats(edev);
942 }
943
944 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
945 {
946         enum qed_filter_rx_mode_type type =
947             QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
948
949         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
950                 type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
951
952         qede_rx_mode_setting(eth_dev, type);
953 }
954
955 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
956 {
957         if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
958                 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
959         else
960                 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
961 }
962
963 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
964                               struct rte_eth_fc_conf *fc_conf)
965 {
966         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
967         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
968         struct qed_link_output current_link;
969         struct qed_link_params params;
970
971         memset(&current_link, 0, sizeof(current_link));
972         qdev->ops->common->get_link(edev, &current_link);
973
974         memset(&params, 0, sizeof(params));
975         params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
976         if (fc_conf->autoneg) {
977                 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
978                         DP_ERR(edev, "Autoneg not supported\n");
979                         return -EINVAL;
980                 }
981                 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
982         }
983
984         /* Pause is assumed to be supported (SUPPORTED_Pause) */
985         if (fc_conf->mode == RTE_FC_FULL)
986                 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
987                                         QED_LINK_PAUSE_RX_ENABLE);
988         if (fc_conf->mode == RTE_FC_TX_PAUSE)
989                 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
990         if (fc_conf->mode == RTE_FC_RX_PAUSE)
991                 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
992
993         params.link_up = true;
994         (void)qdev->ops->common->set_link(edev, &params);
995
996         return 0;
997 }
998
999 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1000                               struct rte_eth_fc_conf *fc_conf)
1001 {
1002         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1003         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
1004         struct qed_link_output current_link;
1005
1006         memset(&current_link, 0, sizeof(current_link));
1007         qdev->ops->common->get_link(edev, &current_link);
1008
1009         if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1010                 fc_conf->autoneg = true;
1011
1012         if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
1013                                          QED_LINK_PAUSE_TX_ENABLE))
1014                 fc_conf->mode = RTE_FC_FULL;
1015         else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
1016                 fc_conf->mode = RTE_FC_RX_PAUSE;
1017         else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
1018                 fc_conf->mode = RTE_FC_TX_PAUSE;
1019         else
1020                 fc_conf->mode = RTE_FC_NONE;
1021
1022         return 0;
1023 }
1024
1025 static const uint32_t *
1026 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1027 {
1028         static const uint32_t ptypes[] = {
1029                 RTE_PTYPE_L3_IPV4,
1030                 RTE_PTYPE_L3_IPV6,
1031                 RTE_PTYPE_UNKNOWN
1032         };
1033
1034         if (eth_dev->rx_pkt_burst == qede_recv_pkts)
1035                 return ptypes;
1036
1037         return NULL;
1038 }
1039
1040 void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
1041 {
1042         *rss_caps = 0;
1043         *rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
1044         *rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
1045         *rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
1046         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
1047         *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
1048         *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
1049 }
1050
1051 static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
1052                                 struct rte_eth_rss_conf *rss_conf)
1053 {
1054         struct qed_update_vport_params vport_update_params;
1055         struct qede_dev *qdev = eth_dev->data->dev_private;
1056         struct ecore_dev *edev = &qdev->edev;
1057         uint32_t *key = (uint32_t *)rss_conf->rss_key;
1058         uint64_t hf = rss_conf->rss_hf;
1059         int i;
1060
1061         memset(&vport_update_params, 0, sizeof(vport_update_params));
1062
1063         if (hf != 0) {
1064                 /* Enable RSS */
1065                 qede_init_rss_caps(&qdev->rss_params.rss_caps, hf);
1066                 memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1067                        sizeof(vport_update_params.rss_params));
1068                 if (key)
1069                         memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
1070                                rss_conf->rss_key_len);
1071                 vport_update_params.update_rss_flg = 1;
1072                 qdev->rss_enabled = 1;
1073         } else {
1074                 /* Disable RSS */
1075                 qdev->rss_enabled = 0;
1076         }
1077
1078         /* If the mapping doesn't fit any supported, return */
1079         if (qdev->rss_params.rss_caps == 0 && hf != 0)
1080                 return -EINVAL;
1081
1082         DP_INFO(edev, "%s\n", (vport_update_params.update_rss_flg) ?
1083                                 "Enabling RSS" : "Disabling RSS");
1084
1085         vport_update_params.vport_id = 0;
1086
1087         return qdev->ops->vport_update(edev, &vport_update_params);
1088 }
1089
1090 int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
1091                            struct rte_eth_rss_conf *rss_conf)
1092 {
1093         struct qede_dev *qdev = eth_dev->data->dev_private;
1094         uint64_t hf;
1095
1096         if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
1097                 return -EINVAL;
1098
1099         if (rss_conf->rss_key)
1100                 memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
1101                        sizeof(qdev->rss_params.rss_key));
1102
1103         hf = 0;
1104         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4)     ?
1105                         ETH_RSS_IPV4 : 0;
1106         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6)     ?
1107                         ETH_RSS_IPV6 : 0;
1108         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6)     ?
1109                         ETH_RSS_IPV6_EX : 0;
1110         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
1111                         ETH_RSS_NONFRAG_IPV4_TCP : 0;
1112         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1113                         ETH_RSS_NONFRAG_IPV6_TCP : 0;
1114         hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
1115                         ETH_RSS_IPV6_TCP_EX : 0;
1116
1117         rss_conf->rss_hf = hf;
1118
1119         return 0;
1120 }
1121
1122 static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
1123                                 struct rte_eth_rss_reta_entry64 *reta_conf,
1124                                 uint16_t reta_size)
1125 {
1126         struct qed_update_vport_params vport_update_params;
1127         struct qede_dev *qdev = eth_dev->data->dev_private;
1128         struct ecore_dev *edev = &qdev->edev;
1129         uint16_t i, idx, shift;
1130
1131         if (reta_size > ETH_RSS_RETA_SIZE_128) {
1132                 DP_ERR(edev, "reta_size %d is not supported by hardware\n",
1133                        reta_size);
1134                 return -EINVAL;
1135         }
1136
1137         memset(&vport_update_params, 0, sizeof(vport_update_params));
1138         memcpy(&vport_update_params.rss_params, &qdev->rss_params,
1139                sizeof(vport_update_params.rss_params));
1140
1141         for (i = 0; i < reta_size; i++) {
1142                 idx = i / RTE_RETA_GROUP_SIZE;
1143                 shift = i % RTE_RETA_GROUP_SIZE;
1144                 if (reta_conf[idx].mask & (1ULL << shift)) {
1145                         uint8_t entry = reta_conf[idx].reta[shift];
1146                         qdev->rss_params.rss_ind_table[i] = entry;
1147                 }
1148         }
1149
1150         vport_update_params.update_rss_flg = 1;
1151         vport_update_params.vport_id = 0;
1152
1153         return qdev->ops->vport_update(edev, &vport_update_params);
1154 }
1155
1156 int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
1157                         struct rte_eth_rss_reta_entry64 *reta_conf,
1158                         uint16_t reta_size)
1159 {
1160         struct qede_dev *qdev = eth_dev->data->dev_private;
1161         uint16_t i, idx, shift;
1162
1163         if (reta_size > ETH_RSS_RETA_SIZE_128) {
1164                 struct ecore_dev *edev = &qdev->edev;
1165                 DP_ERR(edev, "reta_size %d is not supported\n",
1166                        reta_size);
1167         }
1168
1169         for (i = 0; i < reta_size; i++) {
1170                 idx = i / RTE_RETA_GROUP_SIZE;
1171                 shift = i % RTE_RETA_GROUP_SIZE;
1172                 if (reta_conf[idx].mask & (1ULL << shift)) {
1173                         uint8_t entry = qdev->rss_params.rss_ind_table[i];
1174                         reta_conf[idx].reta[shift] = entry;
1175                 }
1176         }
1177
1178         return 0;
1179 }
1180
1181 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1182 {
1183         uint32_t frame_size;
1184         struct qede_dev *qdev = dev->data->dev_private;
1185         struct rte_eth_dev_info dev_info = {0};
1186
1187         qede_dev_info_get(dev, &dev_info);
1188
1189         /* VLAN_TAG = 4 */
1190         frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
1191
1192         if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1193                 return -EINVAL;
1194
1195         if (!dev->data->scattered_rx &&
1196             frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1197                 return -EINVAL;
1198
1199         if (frame_size > ETHER_MAX_LEN)
1200                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1201         else
1202                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1203
1204         /* update max frame size */
1205         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1206         qdev->mtu = mtu;
1207         qede_dev_stop(dev);
1208         qede_dev_start(dev);
1209
1210         return 0;
1211 }
1212
1213 static const struct eth_dev_ops qede_eth_dev_ops = {
1214         .dev_configure = qede_dev_configure,
1215         .dev_infos_get = qede_dev_info_get,
1216         .rx_queue_setup = qede_rx_queue_setup,
1217         .rx_queue_release = qede_rx_queue_release,
1218         .tx_queue_setup = qede_tx_queue_setup,
1219         .tx_queue_release = qede_tx_queue_release,
1220         .dev_start = qede_dev_start,
1221         .dev_set_link_up = qede_dev_set_link_up,
1222         .dev_set_link_down = qede_dev_set_link_down,
1223         .link_update = qede_link_update,
1224         .promiscuous_enable = qede_promiscuous_enable,
1225         .promiscuous_disable = qede_promiscuous_disable,
1226         .allmulticast_enable = qede_allmulticast_enable,
1227         .allmulticast_disable = qede_allmulticast_disable,
1228         .dev_stop = qede_dev_stop,
1229         .dev_close = qede_dev_close,
1230         .stats_get = qede_get_stats,
1231         .stats_reset = qede_reset_stats,
1232         .xstats_get = qede_get_xstats,
1233         .xstats_reset = qede_reset_xstats,
1234         .xstats_get_names = qede_get_xstats_names,
1235         .mac_addr_add = qede_mac_addr_add,
1236         .mac_addr_remove = qede_mac_addr_remove,
1237         .mac_addr_set = qede_mac_addr_set,
1238         .vlan_offload_set = qede_vlan_offload_set,
1239         .vlan_filter_set = qede_vlan_filter_set,
1240         .flow_ctrl_set = qede_flow_ctrl_set,
1241         .flow_ctrl_get = qede_flow_ctrl_get,
1242         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1243         .rss_hash_update = qede_rss_hash_update,
1244         .rss_hash_conf_get = qede_rss_hash_conf_get,
1245         .reta_update  = qede_rss_reta_update,
1246         .reta_query  = qede_rss_reta_query,
1247         .mtu_set = qede_set_mtu,
1248 };
1249
1250 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
1251         .dev_configure = qede_dev_configure,
1252         .dev_infos_get = qede_dev_info_get,
1253         .rx_queue_setup = qede_rx_queue_setup,
1254         .rx_queue_release = qede_rx_queue_release,
1255         .tx_queue_setup = qede_tx_queue_setup,
1256         .tx_queue_release = qede_tx_queue_release,
1257         .dev_start = qede_dev_start,
1258         .dev_set_link_up = qede_dev_set_link_up,
1259         .dev_set_link_down = qede_dev_set_link_down,
1260         .link_update = qede_link_update,
1261         .promiscuous_enable = qede_promiscuous_enable,
1262         .promiscuous_disable = qede_promiscuous_disable,
1263         .allmulticast_enable = qede_allmulticast_enable,
1264         .allmulticast_disable = qede_allmulticast_disable,
1265         .dev_stop = qede_dev_stop,
1266         .dev_close = qede_dev_close,
1267         .stats_get = qede_get_stats,
1268         .stats_reset = qede_reset_stats,
1269         .xstats_get = qede_get_xstats,
1270         .xstats_reset = qede_reset_xstats,
1271         .xstats_get_names = qede_get_xstats_names,
1272         .vlan_offload_set = qede_vlan_offload_set,
1273         .vlan_filter_set = qede_vlan_filter_set,
1274         .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
1275         .rss_hash_update = qede_rss_hash_update,
1276         .rss_hash_conf_get = qede_rss_hash_conf_get,
1277         .reta_update  = qede_rss_reta_update,
1278         .reta_query  = qede_rss_reta_query,
1279         .mtu_set = qede_set_mtu,
1280 };
1281
1282 static void qede_update_pf_params(struct ecore_dev *edev)
1283 {
1284         struct ecore_pf_params pf_params;
1285         /* 32 rx + 32 tx */
1286         memset(&pf_params, 0, sizeof(struct ecore_pf_params));
1287         pf_params.eth_pf_params.num_cons = 64;
1288         qed_ops->common->update_pf_params(edev, &pf_params);
1289 }
1290
1291 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
1292 {
1293         struct rte_pci_device *pci_dev;
1294         struct rte_pci_addr pci_addr;
1295         struct qede_dev *adapter;
1296         struct ecore_dev *edev;
1297         struct qed_dev_eth_info dev_info;
1298         struct qed_slowpath_params params;
1299         static bool do_once = true;
1300         uint8_t bulletin_change;
1301         uint8_t vf_mac[ETHER_ADDR_LEN];
1302         uint8_t is_mac_forced;
1303         bool is_mac_exist;
1304         /* Fix up ecore debug level */
1305         uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
1306         uint8_t dp_level = ECORE_LEVEL_VERBOSE;
1307         uint32_t max_mac_addrs;
1308         int rc;
1309
1310         /* Extract key data structures */
1311         adapter = eth_dev->data->dev_private;
1312         edev = &adapter->edev;
1313         pci_addr = eth_dev->pci_dev->addr;
1314
1315         PMD_INIT_FUNC_TRACE(edev);
1316
1317         snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
1318                  pci_addr.bus, pci_addr.devid, pci_addr.function,
1319                  eth_dev->data->port_id);
1320
1321         eth_dev->rx_pkt_burst = qede_recv_pkts;
1322         eth_dev->tx_pkt_burst = qede_xmit_pkts;
1323
1324         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1325                 DP_NOTICE(edev, false,
1326                           "Skipping device init from secondary process\n");
1327                 return 0;
1328         }
1329
1330         pci_dev = eth_dev->pci_dev;
1331
1332         rte_eth_copy_pci_info(eth_dev, pci_dev);
1333
1334         qed_ops = qed_get_eth_ops();
1335         if (!qed_ops) {
1336                 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
1337                 return -EINVAL;
1338         }
1339
1340         DP_INFO(edev, "Starting qede probe\n");
1341
1342         rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
1343                                     dp_module, dp_level, is_vf);
1344
1345         if (rc != 0) {
1346                 DP_ERR(edev, "qede probe failed rc %d\n", rc);
1347                 return -ENODEV;
1348         }
1349
1350         qede_update_pf_params(edev);
1351
1352         rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
1353                                    qede_interrupt_handler, (void *)eth_dev);
1354
1355         if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
1356                 DP_ERR(edev, "rte_intr_enable() failed\n");
1357                 return -ENODEV;
1358         }
1359
1360         /* Start the Slowpath-process */
1361         memset(&params, 0, sizeof(struct qed_slowpath_params));
1362         params.int_mode = ECORE_INT_MODE_MSIX;
1363         params.drv_major = QEDE_PMD_VERSION_MAJOR;
1364         params.drv_minor = QEDE_PMD_VERSION_MINOR;
1365         params.drv_rev = QEDE_PMD_VERSION_REVISION;
1366         params.drv_eng = QEDE_PMD_VERSION_PATCH;
1367         strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
1368                 QEDE_PMD_DRV_VER_STR_SIZE);
1369
1370         /* For CMT mode device do periodic polling for slowpath events.
1371          * This is required since uio device uses only one MSI-x
1372          * interrupt vector but we need one for each engine.
1373          */
1374         if (edev->num_hwfns > 1 && IS_PF(edev)) {
1375                 rc = rte_eal_alarm_set(timer_period * US_PER_S,
1376                                        qede_poll_sp_sb_cb,
1377                                        (void *)eth_dev);
1378                 if (rc != 0) {
1379                         DP_ERR(edev, "Unable to start periodic"
1380                                      " timer rc %d\n", rc);
1381                         return -EINVAL;
1382                 }
1383         }
1384
1385         rc = qed_ops->common->slowpath_start(edev, &params);
1386         if (rc) {
1387                 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
1388                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1389                                      (void *)eth_dev);
1390                 return -ENODEV;
1391         }
1392
1393         rc = qed_ops->fill_dev_info(edev, &dev_info);
1394         if (rc) {
1395                 DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
1396                 qed_ops->common->slowpath_stop(edev);
1397                 qed_ops->common->remove(edev);
1398                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1399                                      (void *)eth_dev);
1400                 return -ENODEV;
1401         }
1402
1403         qede_alloc_etherdev(adapter, &dev_info);
1404
1405         adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION);
1406
1407         if (!is_vf)
1408                 adapter->dev_info.num_mac_addrs =
1409                         (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
1410                                             ECORE_MAC);
1411         else
1412                 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
1413                                              &adapter->dev_info.num_mac_addrs);
1414
1415         /* Allocate memory for storing MAC addr */
1416         eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
1417                                         (ETHER_ADDR_LEN *
1418                                         adapter->dev_info.num_mac_addrs),
1419                                         RTE_CACHE_LINE_SIZE);
1420
1421         if (eth_dev->data->mac_addrs == NULL) {
1422                 DP_ERR(edev, "Failed to allocate MAC address\n");
1423                 qed_ops->common->slowpath_stop(edev);
1424                 qed_ops->common->remove(edev);
1425                 rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
1426                                      (void *)eth_dev);
1427                 return -ENOMEM;
1428         }
1429
1430         if (!is_vf) {
1431                 ether_addr_copy((struct ether_addr *)edev->hwfns[0].
1432                                 hw_info.hw_mac_addr,
1433                                 &eth_dev->data->mac_addrs[0]);
1434                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
1435                                 &adapter->primary_mac);
1436         } else {
1437                 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
1438                                        &bulletin_change);
1439                 if (bulletin_change) {
1440                         is_mac_exist =
1441                             ecore_vf_bulletin_get_forced_mac(
1442                                                 ECORE_LEADING_HWFN(edev),
1443                                                 vf_mac,
1444                                                 &is_mac_forced);
1445                         if (is_mac_exist && is_mac_forced) {
1446                                 DP_INFO(edev, "VF macaddr received from PF\n");
1447                                 ether_addr_copy((struct ether_addr *)&vf_mac,
1448                                                 &eth_dev->data->mac_addrs[0]);
1449                                 ether_addr_copy(&eth_dev->data->mac_addrs[0],
1450                                                 &adapter->primary_mac);
1451                         } else {
1452                                 DP_NOTICE(edev, false,
1453                                           "No VF macaddr assigned\n");
1454                         }
1455                 }
1456         }
1457
1458         eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
1459
1460         if (do_once) {
1461                 qede_print_adapter_info(adapter);
1462                 do_once = false;
1463         }
1464
1465         adapter->state = QEDE_DEV_INIT;
1466
1467         DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
1468                   adapter->primary_mac.addr_bytes[0],
1469                   adapter->primary_mac.addr_bytes[1],
1470                   adapter->primary_mac.addr_bytes[2],
1471                   adapter->primary_mac.addr_bytes[3],
1472                   adapter->primary_mac.addr_bytes[4],
1473                   adapter->primary_mac.addr_bytes[5]);
1474
1475         return rc;
1476 }
1477
1478 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
1479 {
1480         return qede_common_dev_init(eth_dev, 1);
1481 }
1482
1483 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
1484 {
1485         return qede_common_dev_init(eth_dev, 0);
1486 }
1487
1488 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
1489 {
1490         /* only uninitialize in the primary process */
1491         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1492                 return 0;
1493
1494         /* safe to close dev here */
1495         qede_dev_close(eth_dev);
1496
1497         eth_dev->dev_ops = NULL;
1498         eth_dev->rx_pkt_burst = NULL;
1499         eth_dev->tx_pkt_burst = NULL;
1500
1501         if (eth_dev->data->mac_addrs)
1502                 rte_free(eth_dev->data->mac_addrs);
1503
1504         eth_dev->data->mac_addrs = NULL;
1505
1506         return 0;
1507 }
1508
1509 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1510 {
1511         return qede_dev_common_uninit(eth_dev);
1512 }
1513
1514 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1515 {
1516         return qede_dev_common_uninit(eth_dev);
1517 }
1518
1519 static struct rte_pci_id pci_id_qedevf_map[] = {
1520 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1521         {
1522                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
1523         },
1524         {
1525                 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
1526         },
1527         {.vendor_id = 0,}
1528 };
1529
1530 static struct rte_pci_id pci_id_qede_map[] = {
1531 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
1532         {
1533                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
1534         },
1535         {
1536                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
1537         },
1538         {
1539                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
1540         },
1541         {
1542                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
1543         },
1544         {
1545                 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100)
1546         },
1547         {.vendor_id = 0,}
1548 };
1549
1550 static struct eth_driver rte_qedevf_pmd = {
1551         .pci_drv = {
1552                     .id_table = pci_id_qedevf_map,
1553                     .drv_flags =
1554                     RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1555                     .probe = rte_eth_dev_pci_probe,
1556                     .remove = rte_eth_dev_pci_remove,
1557                    },
1558         .eth_dev_init = qedevf_eth_dev_init,
1559         .eth_dev_uninit = qedevf_eth_dev_uninit,
1560         .dev_private_size = sizeof(struct qede_dev),
1561 };
1562
1563 static struct eth_driver rte_qede_pmd = {
1564         .pci_drv = {
1565                     .id_table = pci_id_qede_map,
1566                     .drv_flags =
1567                     RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1568                     .probe = rte_eth_dev_pci_probe,
1569                     .remove = rte_eth_dev_pci_remove,
1570                    },
1571         .eth_dev_init = qede_eth_dev_init,
1572         .eth_dev_uninit = qede_eth_dev_uninit,
1573         .dev_private_size = sizeof(struct qede_dev),
1574 };
1575
1576 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv);
1577 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
1578 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv);
1579 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);