e6357810974e0ab835c4d4f7e8517407e23e4f0d
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7 #include <stdbool.h>
8
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 #include <rte_alarm.h>
15 #include <rte_kvargs.h>
16
17 #include "bnxt.h"
18 #include "bnxt_filter.h"
19 #include "bnxt_hwrm.h"
20 #include "bnxt_irq.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_rxq.h"
23 #include "bnxt_rxr.h"
24 #include "bnxt_stats.h"
25 #include "bnxt_txq.h"
26 #include "bnxt_txr.h"
27 #include "bnxt_vnic.h"
28 #include "hsi_struct_def_dpdk.h"
29 #include "bnxt_nvm_defs.h"
30
31 #define DRV_MODULE_NAME         "bnxt"
32 static const char bnxt_version[] =
33         "Broadcom NetXtreme driver " DRV_MODULE_NAME;
34 int bnxt_logtype_driver;
35
36 /*
37  * The set of PCI devices this driver supports
38  */
39 static const struct rte_pci_id bnxt_pci_id_map[] = {
40         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
41                          BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
42         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
43                          BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
44         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
45         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
46         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
47         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
48         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
49         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
50         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
51         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
52         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
53         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
54         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
55         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
56         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
57         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
58         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
59         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
60         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
61         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
62         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
63         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
64         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
65         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
66         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
67         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
68         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
69         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
70         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
71         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
72         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
73         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
74         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
75         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
76         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
77         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
78         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
79         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
80         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
81         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
82         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) },
83         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) },
84         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) },
85         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) },
86         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) },
87         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) },
88         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) },
89         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) },
90         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) },
91         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) },
92         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) },
93         { .vendor_id = 0, /* sentinel */ },
94 };
95
96 #define BNXT_ETH_RSS_SUPPORT (  \
97         ETH_RSS_IPV4 |          \
98         ETH_RSS_NONFRAG_IPV4_TCP |      \
99         ETH_RSS_NONFRAG_IPV4_UDP |      \
100         ETH_RSS_IPV6 |          \
101         ETH_RSS_NONFRAG_IPV6_TCP |      \
102         ETH_RSS_NONFRAG_IPV6_UDP)
103
104 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
105                                      DEV_TX_OFFLOAD_IPV4_CKSUM | \
106                                      DEV_TX_OFFLOAD_TCP_CKSUM | \
107                                      DEV_TX_OFFLOAD_UDP_CKSUM | \
108                                      DEV_TX_OFFLOAD_TCP_TSO | \
109                                      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
110                                      DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
111                                      DEV_TX_OFFLOAD_GRE_TNL_TSO | \
112                                      DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
113                                      DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
114                                      DEV_TX_OFFLOAD_QINQ_INSERT | \
115                                      DEV_TX_OFFLOAD_MULTI_SEGS)
116
117 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
118                                      DEV_RX_OFFLOAD_VLAN_STRIP | \
119                                      DEV_RX_OFFLOAD_IPV4_CKSUM | \
120                                      DEV_RX_OFFLOAD_UDP_CKSUM | \
121                                      DEV_RX_OFFLOAD_TCP_CKSUM | \
122                                      DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
123                                      DEV_RX_OFFLOAD_JUMBO_FRAME | \
124                                      DEV_RX_OFFLOAD_KEEP_CRC | \
125                                      DEV_RX_OFFLOAD_VLAN_EXTEND | \
126                                      DEV_RX_OFFLOAD_TCP_LRO | \
127                                      DEV_RX_OFFLOAD_SCATTER | \
128                                      DEV_RX_OFFLOAD_RSS_HASH)
129
130 #define BNXT_DEVARG_TRUFLOW     "host-based-truflow"
131 #define BNXT_DEVARG_FLOW_XSTAT  "flow-xstat"
132 static const char *const bnxt_dev_args[] = {
133         BNXT_DEVARG_TRUFLOW,
134         BNXT_DEVARG_FLOW_XSTAT,
135         NULL
136 };
137
138 /*
139  * truflow == false to disable the feature
140  * truflow == true to enable the feature
141  */
142 #define BNXT_DEVARG_TRUFLOW_INVALID(truflow)    ((truflow) > 1)
143
144 /*
145  * flow_xstat == false to disable the feature
146  * flow_xstat == true to enable the feature
147  */
148 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)      ((flow_xstat) > 1)
149
150 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
151 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
152 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
153 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
154 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
155 static void bnxt_cancel_fw_health_check(struct bnxt *bp);
156 static int bnxt_restore_vlan_filters(struct bnxt *bp);
157 static void bnxt_dev_recover(void *arg);
158 static void bnxt_free_error_recovery_info(struct bnxt *bp);
159
160 int is_bnxt_in_error(struct bnxt *bp)
161 {
162         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
163                 return -EIO;
164         if (bp->flags & BNXT_FLAG_FW_RESET)
165                 return -EBUSY;
166
167         return 0;
168 }
169
170 /***********************/
171
172 /*
173  * High level utility functions
174  */
175
176 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
177 {
178         if (!BNXT_CHIP_THOR(bp))
179                 return 1;
180
181         return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
182                                   BNXT_RSS_ENTRIES_PER_CTX_THOR) /
183                                     BNXT_RSS_ENTRIES_PER_CTX_THOR;
184 }
185
186 static uint16_t  bnxt_rss_hash_tbl_size(const struct bnxt *bp)
187 {
188         if (!BNXT_CHIP_THOR(bp))
189                 return HW_HASH_INDEX_SIZE;
190
191         return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
192 }
193
194 static void bnxt_free_pf_info(struct bnxt *bp)
195 {
196         rte_free(bp->pf);
197 }
198
199 static void bnxt_free_link_info(struct bnxt *bp)
200 {
201         rte_free(bp->link_info);
202 }
203
204 static void bnxt_free_leds_info(struct bnxt *bp)
205 {
206         rte_free(bp->leds);
207         bp->leds = NULL;
208 }
209
210 static void bnxt_free_flow_stats_info(struct bnxt *bp)
211 {
212         rte_free(bp->flow_stat);
213         bp->flow_stat = NULL;
214 }
215
216 static void bnxt_free_cos_queues(struct bnxt *bp)
217 {
218         rte_free(bp->rx_cos_queue);
219         rte_free(bp->tx_cos_queue);
220 }
221
222 static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
223 {
224         bnxt_free_flow_stats_info(bp);
225
226         bnxt_free_filter_mem(bp);
227         bnxt_free_vnic_attributes(bp);
228         bnxt_free_vnic_mem(bp);
229
230         /* tx/rx rings are configured as part of *_queue_setup callbacks.
231          * If the number of rings change across fw update,
232          * we don't have much choice except to warn the user.
233          */
234         if (!reconfig) {
235                 bnxt_free_stats(bp);
236                 bnxt_free_tx_rings(bp);
237                 bnxt_free_rx_rings(bp);
238         }
239         bnxt_free_async_cp_ring(bp);
240         bnxt_free_rxtx_nq_ring(bp);
241
242         rte_free(bp->grp_info);
243         bp->grp_info = NULL;
244 }
245
246 static int bnxt_alloc_pf_info(struct bnxt *bp)
247 {
248         bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0);
249         if (bp->pf == NULL)
250                 return -ENOMEM;
251
252         return 0;
253 }
254
255 static int bnxt_alloc_link_info(struct bnxt *bp)
256 {
257         bp->link_info =
258                 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0);
259         if (bp->link_info == NULL)
260                 return -ENOMEM;
261
262         return 0;
263 }
264
265 static int bnxt_alloc_leds_info(struct bnxt *bp)
266 {
267         bp->leds = rte_zmalloc("bnxt_leds",
268                                BNXT_MAX_LED * sizeof(struct bnxt_led_info),
269                                0);
270         if (bp->leds == NULL)
271                 return -ENOMEM;
272
273         return 0;
274 }
275
276 static int bnxt_alloc_cos_queues(struct bnxt *bp)
277 {
278         bp->rx_cos_queue =
279                 rte_zmalloc("bnxt_rx_cosq",
280                             BNXT_COS_QUEUE_COUNT *
281                             sizeof(struct bnxt_cos_queue_info),
282                             0);
283         if (bp->rx_cos_queue == NULL)
284                 return -ENOMEM;
285
286         bp->tx_cos_queue =
287                 rte_zmalloc("bnxt_tx_cosq",
288                             BNXT_COS_QUEUE_COUNT *
289                             sizeof(struct bnxt_cos_queue_info),
290                             0);
291         if (bp->tx_cos_queue == NULL)
292                 return -ENOMEM;
293
294         return 0;
295 }
296
297 static int bnxt_alloc_flow_stats_info(struct bnxt *bp)
298 {
299         bp->flow_stat = rte_zmalloc("bnxt_flow_xstat",
300                                     sizeof(struct bnxt_flow_stat_info), 0);
301         if (bp->flow_stat == NULL)
302                 return -ENOMEM;
303
304         return 0;
305 }
306
307 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
308 {
309         int rc;
310
311         rc = bnxt_alloc_ring_grps(bp);
312         if (rc)
313                 goto alloc_mem_err;
314
315         rc = bnxt_alloc_async_ring_struct(bp);
316         if (rc)
317                 goto alloc_mem_err;
318
319         rc = bnxt_alloc_vnic_mem(bp);
320         if (rc)
321                 goto alloc_mem_err;
322
323         rc = bnxt_alloc_vnic_attributes(bp);
324         if (rc)
325                 goto alloc_mem_err;
326
327         rc = bnxt_alloc_filter_mem(bp);
328         if (rc)
329                 goto alloc_mem_err;
330
331         rc = bnxt_alloc_async_cp_ring(bp);
332         if (rc)
333                 goto alloc_mem_err;
334
335         rc = bnxt_alloc_rxtx_nq_ring(bp);
336         if (rc)
337                 goto alloc_mem_err;
338
339         if (BNXT_FLOW_XSTATS_EN(bp)) {
340                 rc = bnxt_alloc_flow_stats_info(bp);
341                 if (rc)
342                         goto alloc_mem_err;
343         }
344
345         return 0;
346
347 alloc_mem_err:
348         bnxt_free_mem(bp, reconfig);
349         return rc;
350 }
351
352 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
353 {
354         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
355         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
356         uint64_t rx_offloads = dev_conf->rxmode.offloads;
357         struct bnxt_rx_queue *rxq;
358         unsigned int j;
359         int rc;
360
361         rc = bnxt_vnic_grp_alloc(bp, vnic);
362         if (rc)
363                 goto err_out;
364
365         PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
366                     vnic_id, vnic, vnic->fw_grp_ids);
367
368         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
369         if (rc)
370                 goto err_out;
371
372         /* Alloc RSS context only if RSS mode is enabled */
373         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
374                 int j, nr_ctxs = bnxt_rss_ctxts(bp);
375
376                 rc = 0;
377                 for (j = 0; j < nr_ctxs; j++) {
378                         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
379                         if (rc)
380                                 break;
381                 }
382                 if (rc) {
383                         PMD_DRV_LOG(ERR,
384                                     "HWRM vnic %d ctx %d alloc failure rc: %x\n",
385                                     vnic_id, j, rc);
386                         goto err_out;
387                 }
388                 vnic->num_lb_ctxts = nr_ctxs;
389         }
390
391         /*
392          * Firmware sets pf pair in default vnic cfg. If the VLAN strip
393          * setting is not available at this time, it will not be
394          * configured correctly in the CFA.
395          */
396         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
397                 vnic->vlan_strip = true;
398         else
399                 vnic->vlan_strip = false;
400
401         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
402         if (rc)
403                 goto err_out;
404
405         rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
406         if (rc)
407                 goto err_out;
408
409         for (j = 0; j < bp->rx_num_qs_per_vnic; j++) {
410                 rxq = bp->eth_dev->data->rx_queues[j];
411
412                 PMD_DRV_LOG(DEBUG,
413                             "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n",
414                             j, rxq->vnic, rxq->vnic->fw_grp_ids);
415
416                 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start)
417                         rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
418                 else
419                         vnic->rx_queue_cnt++;
420         }
421
422         PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt);
423
424         rc = bnxt_vnic_rss_configure(bp, vnic);
425         if (rc)
426                 goto err_out;
427
428         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
429
430         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
431                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
432         else
433                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
434
435         return 0;
436 err_out:
437         PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
438                     vnic_id, rc);
439         return rc;
440 }
441
442 static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
443 {
444         int rc = 0;
445
446         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma,
447                                 &bp->flow_stat->rx_fc_in_tbl.ctx_id);
448         if (rc)
449                 return rc;
450
451         PMD_DRV_LOG(DEBUG,
452                     "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p"
453                     " rx_fc_in_tbl.ctx_id = %d\n",
454                     bp->flow_stat->rx_fc_in_tbl.va,
455                     (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma),
456                     bp->flow_stat->rx_fc_in_tbl.ctx_id);
457
458         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma,
459                                 &bp->flow_stat->rx_fc_out_tbl.ctx_id);
460         if (rc)
461                 return rc;
462
463         PMD_DRV_LOG(DEBUG,
464                     "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p"
465                     " rx_fc_out_tbl.ctx_id = %d\n",
466                     bp->flow_stat->rx_fc_out_tbl.va,
467                     (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma),
468                     bp->flow_stat->rx_fc_out_tbl.ctx_id);
469
470         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma,
471                                 &bp->flow_stat->tx_fc_in_tbl.ctx_id);
472         if (rc)
473                 return rc;
474
475         PMD_DRV_LOG(DEBUG,
476                     "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p"
477                     " tx_fc_in_tbl.ctx_id = %d\n",
478                     bp->flow_stat->tx_fc_in_tbl.va,
479                     (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma),
480                     bp->flow_stat->tx_fc_in_tbl.ctx_id);
481
482         rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma,
483                                 &bp->flow_stat->tx_fc_out_tbl.ctx_id);
484         if (rc)
485                 return rc;
486
487         PMD_DRV_LOG(DEBUG,
488                     "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p"
489                     " tx_fc_out_tbl.ctx_id = %d\n",
490                     bp->flow_stat->tx_fc_out_tbl.va,
491                     (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma),
492                     bp->flow_stat->tx_fc_out_tbl.ctx_id);
493
494         memset(bp->flow_stat->rx_fc_out_tbl.va,
495                0,
496                bp->flow_stat->rx_fc_out_tbl.size);
497         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
498                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
499                                        bp->flow_stat->rx_fc_out_tbl.ctx_id,
500                                        bp->flow_stat->max_fc,
501                                        true);
502         if (rc)
503                 return rc;
504
505         memset(bp->flow_stat->tx_fc_out_tbl.va,
506                0,
507                bp->flow_stat->tx_fc_out_tbl.size);
508         rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
509                                        CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
510                                        bp->flow_stat->tx_fc_out_tbl.ctx_id,
511                                        bp->flow_stat->max_fc,
512                                        true);
513
514         return rc;
515 }
516
517 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
518                                   struct bnxt_ctx_mem_buf_info *ctx)
519 {
520         if (!ctx)
521                 return -EINVAL;
522
523         ctx->va = rte_zmalloc(type, size, 0);
524         if (ctx->va == NULL)
525                 return -ENOMEM;
526         rte_mem_lock_page(ctx->va);
527         ctx->size = size;
528         ctx->dma = rte_mem_virt2iova(ctx->va);
529         if (ctx->dma == RTE_BAD_IOVA)
530                 return -ENOMEM;
531
532         return 0;
533 }
534
535 static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
536 {
537         struct rte_pci_device *pdev = bp->pdev;
538         char type[RTE_MEMZONE_NAMESIZE];
539         uint16_t max_fc;
540         int rc = 0;
541
542         max_fc = bp->flow_stat->max_fc;
543
544         sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
545                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
546         /* 4 bytes for each counter-id */
547         rc = bnxt_alloc_ctx_mem_buf(type,
548                                     max_fc * 4,
549                                     &bp->flow_stat->rx_fc_in_tbl);
550         if (rc)
551                 return rc;
552
553         sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
554                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
555         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
556         rc = bnxt_alloc_ctx_mem_buf(type,
557                                     max_fc * 16,
558                                     &bp->flow_stat->rx_fc_out_tbl);
559         if (rc)
560                 return rc;
561
562         sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
563                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
564         /* 4 bytes for each counter-id */
565         rc = bnxt_alloc_ctx_mem_buf(type,
566                                     max_fc * 4,
567                                     &bp->flow_stat->tx_fc_in_tbl);
568         if (rc)
569                 return rc;
570
571         sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
572                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
573         /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
574         rc = bnxt_alloc_ctx_mem_buf(type,
575                                     max_fc * 16,
576                                     &bp->flow_stat->tx_fc_out_tbl);
577         if (rc)
578                 return rc;
579
580         rc = bnxt_register_fc_ctx_mem(bp);
581
582         return rc;
583 }
584
585 static int bnxt_init_ctx_mem(struct bnxt *bp)
586 {
587         int rc = 0;
588
589         if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) ||
590             !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) ||
591             !BNXT_FLOW_XSTATS_EN(bp))
592                 return 0;
593
594         rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc);
595         if (rc)
596                 return rc;
597
598         rc = bnxt_init_fc_ctx_mem(bp);
599
600         return rc;
601 }
602
603 static int bnxt_init_chip(struct bnxt *bp)
604 {
605         struct rte_eth_link new;
606         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
607         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
608         uint32_t intr_vector = 0;
609         uint32_t queue_id, base = BNXT_MISC_VEC_ID;
610         uint32_t vec = BNXT_MISC_VEC_ID;
611         unsigned int i, j;
612         int rc;
613
614         if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
615                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
616                         DEV_RX_OFFLOAD_JUMBO_FRAME;
617                 bp->flags |= BNXT_FLAG_JUMBO;
618         } else {
619                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
620                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
621                 bp->flags &= ~BNXT_FLAG_JUMBO;
622         }
623
624         /* THOR does not support ring groups.
625          * But we will use the array to save RSS context IDs.
626          */
627         if (BNXT_CHIP_THOR(bp))
628                 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
629
630         rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
631         if (rc) {
632                 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
633                 goto err_out;
634         }
635
636         rc = bnxt_alloc_hwrm_rings(bp);
637         if (rc) {
638                 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
639                 goto err_out;
640         }
641
642         rc = bnxt_alloc_all_hwrm_ring_grps(bp);
643         if (rc) {
644                 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
645                 goto err_out;
646         }
647
648         if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
649                 goto skip_cosq_cfg;
650
651         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
652                 if (bp->rx_cos_queue[i].id != 0xff) {
653                         struct bnxt_vnic_info *vnic = &bp->vnic_info[j++];
654
655                         if (!vnic) {
656                                 PMD_DRV_LOG(ERR,
657                                             "Num pools more than FW profile\n");
658                                 rc = -EINVAL;
659                                 goto err_out;
660                         }
661                         vnic->cos_queue_id = bp->rx_cos_queue[i].id;
662                         bp->rx_cosq_cnt++;
663                 }
664         }
665
666 skip_cosq_cfg:
667         rc = bnxt_mq_rx_configure(bp);
668         if (rc) {
669                 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
670                 goto err_out;
671         }
672
673         /* VNIC configuration */
674         for (i = 0; i < bp->nr_vnics; i++) {
675                 rc = bnxt_setup_one_vnic(bp, i);
676                 if (rc)
677                         goto err_out;
678         }
679
680         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
681         if (rc) {
682                 PMD_DRV_LOG(ERR,
683                         "HWRM cfa l2 rx mask failure rc: %x\n", rc);
684                 goto err_out;
685         }
686
687         /* check and configure queue intr-vector mapping */
688         if ((rte_intr_cap_multiple(intr_handle) ||
689              !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
690             bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
691                 intr_vector = bp->eth_dev->data->nb_rx_queues;
692                 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
693                 if (intr_vector > bp->rx_cp_nr_rings) {
694                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
695                                         bp->rx_cp_nr_rings);
696                         return -ENOTSUP;
697                 }
698                 rc = rte_intr_efd_enable(intr_handle, intr_vector);
699                 if (rc)
700                         return rc;
701         }
702
703         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
704                 intr_handle->intr_vec =
705                         rte_zmalloc("intr_vec",
706                                     bp->eth_dev->data->nb_rx_queues *
707                                     sizeof(int), 0);
708                 if (intr_handle->intr_vec == NULL) {
709                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
710                                 " intr_vec", bp->eth_dev->data->nb_rx_queues);
711                         rc = -ENOMEM;
712                         goto err_disable;
713                 }
714                 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
715                         "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
716                          intr_handle->intr_vec, intr_handle->nb_efd,
717                         intr_handle->max_intr);
718                 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
719                      queue_id++) {
720                         intr_handle->intr_vec[queue_id] =
721                                                         vec + BNXT_RX_VEC_START;
722                         if (vec < base + intr_handle->nb_efd - 1)
723                                 vec++;
724                 }
725         }
726
727         /* enable uio/vfio intr/eventfd mapping */
728         rc = rte_intr_enable(intr_handle);
729 #ifndef RTE_EXEC_ENV_FREEBSD
730         /* In FreeBSD OS, nic_uio driver does not support interrupts */
731         if (rc)
732                 goto err_free;
733 #endif
734
735         rc = bnxt_get_hwrm_link_config(bp, &new);
736         if (rc) {
737                 PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
738                 goto err_free;
739         }
740
741         if (!bp->link_info->link_up) {
742                 rc = bnxt_set_hwrm_link_config(bp, true);
743                 if (rc) {
744                         PMD_DRV_LOG(ERR,
745                                 "HWRM link config failure rc: %x\n", rc);
746                         goto err_free;
747                 }
748         }
749         bnxt_print_link_info(bp->eth_dev);
750
751         bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0);
752         if (!bp->mark_table)
753                 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n");
754
755         return 0;
756
757 err_free:
758         rte_free(intr_handle->intr_vec);
759 err_disable:
760         rte_intr_efd_disable(intr_handle);
761 err_out:
762         /* Some of the error status returned by FW may not be from errno.h */
763         if (rc > 0)
764                 rc = -EIO;
765
766         return rc;
767 }
768
769 static int bnxt_shutdown_nic(struct bnxt *bp)
770 {
771         bnxt_free_all_hwrm_resources(bp);
772         bnxt_free_all_filters(bp);
773         bnxt_free_all_vnics(bp);
774         return 0;
775 }
776
777 /*
778  * Device configuration and status function
779  */
780
781 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
782 {
783         uint32_t link_speed = bp->link_info->support_speeds;
784         uint32_t speed_capa = 0;
785
786         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
787                 speed_capa |= ETH_LINK_SPEED_100M;
788         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
789                 speed_capa |= ETH_LINK_SPEED_100M_HD;
790         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
791                 speed_capa |= ETH_LINK_SPEED_1G;
792         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
793                 speed_capa |= ETH_LINK_SPEED_2_5G;
794         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
795                 speed_capa |= ETH_LINK_SPEED_10G;
796         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
797                 speed_capa |= ETH_LINK_SPEED_20G;
798         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
799                 speed_capa |= ETH_LINK_SPEED_25G;
800         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
801                 speed_capa |= ETH_LINK_SPEED_40G;
802         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
803                 speed_capa |= ETH_LINK_SPEED_50G;
804         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
805                 speed_capa |= ETH_LINK_SPEED_100G;
806         if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_200GB)
807                 speed_capa |= ETH_LINK_SPEED_200G;
808
809         if (bp->link_info->auto_mode ==
810             HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
811                 speed_capa |= ETH_LINK_SPEED_FIXED;
812         else
813                 speed_capa |= ETH_LINK_SPEED_AUTONEG;
814
815         return speed_capa;
816 }
817
818 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
819                                 struct rte_eth_dev_info *dev_info)
820 {
821         struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
822         struct bnxt *bp = eth_dev->data->dev_private;
823         uint16_t max_vnics, i, j, vpool, vrxq;
824         unsigned int max_rx_rings;
825         int rc;
826
827         rc = is_bnxt_in_error(bp);
828         if (rc)
829                 return rc;
830
831         /* MAC Specifics */
832         dev_info->max_mac_addrs = bp->max_l2_ctx;
833         dev_info->max_hash_mac_addrs = 0;
834
835         /* PF/VF specifics */
836         if (BNXT_PF(bp))
837                 dev_info->max_vfs = pdev->max_vfs;
838
839         max_rx_rings = BNXT_MAX_RINGS(bp);
840         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
841         dev_info->max_rx_queues = max_rx_rings;
842         dev_info->max_tx_queues = max_rx_rings;
843         dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
844         dev_info->hash_key_size = 40;
845         max_vnics = bp->max_vnics;
846
847         /* MTU specifics */
848         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
849         dev_info->max_mtu = BNXT_MAX_MTU;
850
851         /* Fast path specifics */
852         dev_info->min_rx_bufsize = 1;
853         dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
854
855         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
856         if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
857                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
858         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
859         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
860
861         dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
862
863         /* *INDENT-OFF* */
864         dev_info->default_rxconf = (struct rte_eth_rxconf) {
865                 .rx_thresh = {
866                         .pthresh = 8,
867                         .hthresh = 8,
868                         .wthresh = 0,
869                 },
870                 .rx_free_thresh = 32,
871                 /* If no descriptors available, pkts are dropped by default */
872                 .rx_drop_en = 1,
873         };
874
875         dev_info->default_txconf = (struct rte_eth_txconf) {
876                 .tx_thresh = {
877                         .pthresh = 32,
878                         .hthresh = 0,
879                         .wthresh = 0,
880                 },
881                 .tx_free_thresh = 32,
882                 .tx_rs_thresh = 32,
883         };
884         eth_dev->data->dev_conf.intr_conf.lsc = 1;
885
886         eth_dev->data->dev_conf.intr_conf.rxq = 1;
887         dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
888         dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
889         dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
890         dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
891
892         /* *INDENT-ON* */
893
894         /*
895          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
896          *       need further investigation.
897          */
898
899         /* VMDq resources */
900         vpool = 64; /* ETH_64_POOLS */
901         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
902         for (i = 0; i < 4; vpool >>= 1, i++) {
903                 if (max_vnics > vpool) {
904                         for (j = 0; j < 5; vrxq >>= 1, j++) {
905                                 if (dev_info->max_rx_queues > vrxq) {
906                                         if (vpool > vrxq)
907                                                 vpool = vrxq;
908                                         goto found;
909                                 }
910                         }
911                         /* Not enough resources to support VMDq */
912                         break;
913                 }
914         }
915         /* Not enough resources to support VMDq */
916         vpool = 0;
917         vrxq = 0;
918 found:
919         dev_info->max_vmdq_pools = vpool;
920         dev_info->vmdq_queue_num = vrxq;
921
922         dev_info->vmdq_pool_base = 0;
923         dev_info->vmdq_queue_base = 0;
924
925         return 0;
926 }
927
928 /* Configure the device based on the configuration provided */
929 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
930 {
931         struct bnxt *bp = eth_dev->data->dev_private;
932         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
933         int rc;
934
935         bp->rx_queues = (void *)eth_dev->data->rx_queues;
936         bp->tx_queues = (void *)eth_dev->data->tx_queues;
937         bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
938         bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
939
940         rc = is_bnxt_in_error(bp);
941         if (rc)
942                 return rc;
943
944         if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
945                 rc = bnxt_hwrm_check_vf_rings(bp);
946                 if (rc) {
947                         PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
948                         return -ENOSPC;
949                 }
950
951                 /* If a resource has already been allocated - in this case
952                  * it is the async completion ring, free it. Reallocate it after
953                  * resource reservation. This will ensure the resource counts
954                  * are calculated correctly.
955                  */
956
957                 pthread_mutex_lock(&bp->def_cp_lock);
958
959                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
960                         bnxt_disable_int(bp);
961                         bnxt_free_cp_ring(bp, bp->async_cp_ring);
962                 }
963
964                 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
965                 if (rc) {
966                         PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
967                         pthread_mutex_unlock(&bp->def_cp_lock);
968                         return -ENOSPC;
969                 }
970
971                 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) {
972                         rc = bnxt_alloc_async_cp_ring(bp);
973                         if (rc) {
974                                 pthread_mutex_unlock(&bp->def_cp_lock);
975                                 return rc;
976                         }
977                         bnxt_enable_int(bp);
978                 }
979
980                 pthread_mutex_unlock(&bp->def_cp_lock);
981         } else {
982                 /* legacy driver needs to get updated values */
983                 rc = bnxt_hwrm_func_qcaps(bp);
984                 if (rc) {
985                         PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
986                         return rc;
987                 }
988         }
989
990         /* Inherit new configurations */
991         if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
992             eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
993             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
994                 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
995             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
996             bp->max_stat_ctx)
997                 goto resource_error;
998
999         if (BNXT_HAS_RING_GRPS(bp) &&
1000             (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
1001                 goto resource_error;
1002
1003         if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
1004             bp->max_vnics < eth_dev->data->nb_rx_queues)
1005                 goto resource_error;
1006
1007         bp->rx_cp_nr_rings = bp->rx_nr_rings;
1008         bp->tx_cp_nr_rings = bp->tx_nr_rings;
1009
1010         if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1011                 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1012         eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
1013
1014         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1015                 eth_dev->data->mtu =
1016                         eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1017                         RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
1018                         BNXT_NUM_VLANS;
1019                 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
1020         }
1021         return 0;
1022
1023 resource_error:
1024         PMD_DRV_LOG(ERR,
1025                     "Insufficient resources to support requested config\n");
1026         PMD_DRV_LOG(ERR,
1027                     "Num Queues Requested: Tx %d, Rx %d\n",
1028                     eth_dev->data->nb_tx_queues,
1029                     eth_dev->data->nb_rx_queues);
1030         PMD_DRV_LOG(ERR,
1031                     "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
1032                     bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
1033                     bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
1034         return -ENOSPC;
1035 }
1036
1037 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
1038 {
1039         struct rte_eth_link *link = &eth_dev->data->dev_link;
1040
1041         if (link->link_status)
1042                 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
1043                         eth_dev->data->port_id,
1044                         (uint32_t)link->link_speed,
1045                         (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
1046                         ("full-duplex") : ("half-duplex\n"));
1047         else
1048                 PMD_DRV_LOG(INFO, "Port %d Link Down\n",
1049                         eth_dev->data->port_id);
1050 }
1051
1052 /*
1053  * Determine whether the current configuration requires support for scattered
1054  * receive; return 1 if scattered receive is required and 0 if not.
1055  */
1056 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
1057 {
1058         uint16_t buf_size;
1059         int i;
1060
1061         if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
1062                 return 1;
1063
1064         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1065                 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
1066
1067                 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1068                                       RTE_PKTMBUF_HEADROOM);
1069                 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
1070                         return 1;
1071         }
1072         return 0;
1073 }
1074
1075 static eth_rx_burst_t
1076 bnxt_receive_function(struct rte_eth_dev *eth_dev)
1077 {
1078         struct bnxt *bp = eth_dev->data->dev_private;
1079
1080 #ifdef RTE_ARCH_X86
1081 #ifndef RTE_LIBRTE_IEEE1588
1082         /*
1083          * Vector mode receive can be enabled only if scatter rx is not
1084          * in use and rx offloads are limited to VLAN stripping and
1085          * CRC stripping.
1086          */
1087         if (!eth_dev->data->scattered_rx &&
1088             !(eth_dev->data->dev_conf.rxmode.offloads &
1089               ~(DEV_RX_OFFLOAD_VLAN_STRIP |
1090                 DEV_RX_OFFLOAD_KEEP_CRC |
1091                 DEV_RX_OFFLOAD_JUMBO_FRAME |
1092                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1093                 DEV_RX_OFFLOAD_UDP_CKSUM |
1094                 DEV_RX_OFFLOAD_TCP_CKSUM |
1095                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1096                 DEV_RX_OFFLOAD_RSS_HASH |
1097                 DEV_RX_OFFLOAD_VLAN_FILTER)) &&
1098             !BNXT_TRUFLOW_EN(bp)) {
1099                 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
1100                             eth_dev->data->port_id);
1101                 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
1102                 return bnxt_recv_pkts_vec;
1103         }
1104         PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
1105                     eth_dev->data->port_id);
1106         PMD_DRV_LOG(INFO,
1107                     "Port %d scatter: %d rx offload: %" PRIX64 "\n",
1108                     eth_dev->data->port_id,
1109                     eth_dev->data->scattered_rx,
1110                     eth_dev->data->dev_conf.rxmode.offloads);
1111 #endif
1112 #endif
1113         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1114         return bnxt_recv_pkts;
1115 }
1116
1117 static eth_tx_burst_t
1118 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
1119 {
1120 #ifdef RTE_ARCH_X86
1121 #ifndef RTE_LIBRTE_IEEE1588
1122         /*
1123          * Vector mode transmit can be enabled only if not using scatter rx
1124          * or tx offloads.
1125          */
1126         if (!eth_dev->data->scattered_rx &&
1127             !eth_dev->data->dev_conf.txmode.offloads) {
1128                 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
1129                             eth_dev->data->port_id);
1130                 return bnxt_xmit_pkts_vec;
1131         }
1132         PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
1133                     eth_dev->data->port_id);
1134         PMD_DRV_LOG(INFO,
1135                     "Port %d scatter: %d tx offload: %" PRIX64 "\n",
1136                     eth_dev->data->port_id,
1137                     eth_dev->data->scattered_rx,
1138                     eth_dev->data->dev_conf.txmode.offloads);
1139 #endif
1140 #endif
1141         return bnxt_xmit_pkts;
1142 }
1143
1144 static int bnxt_handle_if_change_status(struct bnxt *bp)
1145 {
1146         int rc;
1147
1148         /* Since fw has undergone a reset and lost all contexts,
1149          * set fatal flag to not issue hwrm during cleanup
1150          */
1151         bp->flags |= BNXT_FLAG_FATAL_ERROR;
1152         bnxt_uninit_resources(bp, true);
1153
1154         /* clear fatal flag so that re-init happens */
1155         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
1156         rc = bnxt_init_resources(bp, true);
1157
1158         bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
1159
1160         return rc;
1161 }
1162
1163 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
1164 {
1165         struct bnxt *bp = eth_dev->data->dev_private;
1166         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
1167         int vlan_mask = 0;
1168         int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT;
1169
1170         if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) {
1171                 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n");
1172                 return -EINVAL;
1173         }
1174
1175         if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1176                 PMD_DRV_LOG(ERR,
1177                         "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
1178                         bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1179         }
1180
1181         do {
1182                 rc = bnxt_hwrm_if_change(bp, true);
1183                 if (rc == 0 || rc != -EAGAIN)
1184                         break;
1185
1186                 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL);
1187         } while (retry_cnt--);
1188
1189         if (rc)
1190                 return rc;
1191
1192         if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) {
1193                 rc = bnxt_handle_if_change_status(bp);
1194                 if (rc)
1195                         return rc;
1196         }
1197
1198         bnxt_enable_int(bp);
1199
1200         rc = bnxt_init_chip(bp);
1201         if (rc)
1202                 goto error;
1203
1204         eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
1205         eth_dev->data->dev_started = 1;
1206
1207         bnxt_link_update(eth_dev, 1, ETH_LINK_UP);
1208
1209         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1210                 vlan_mask |= ETH_VLAN_FILTER_MASK;
1211         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1212                 vlan_mask |= ETH_VLAN_STRIP_MASK;
1213         rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
1214         if (rc)
1215                 goto error;
1216
1217         eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
1218         eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
1219
1220         pthread_mutex_lock(&bp->def_cp_lock);
1221         bnxt_schedule_fw_health_check(bp);
1222         pthread_mutex_unlock(&bp->def_cp_lock);
1223
1224         if (BNXT_TRUFLOW_EN(bp))
1225                 bnxt_ulp_init(bp);
1226
1227         return 0;
1228
1229 error:
1230         bnxt_shutdown_nic(bp);
1231         bnxt_free_tx_mbufs(bp);
1232         bnxt_free_rx_mbufs(bp);
1233         bnxt_hwrm_if_change(bp, false);
1234         eth_dev->data->dev_started = 0;
1235         return rc;
1236 }
1237
1238 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
1239 {
1240         struct bnxt *bp = eth_dev->data->dev_private;
1241         int rc = 0;
1242
1243         if (!bp->link_info->link_up)
1244                 rc = bnxt_set_hwrm_link_config(bp, true);
1245         if (!rc)
1246                 eth_dev->data->dev_link.link_status = 1;
1247
1248         bnxt_print_link_info(eth_dev);
1249         return rc;
1250 }
1251
1252 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
1253 {
1254         struct bnxt *bp = eth_dev->data->dev_private;
1255
1256         eth_dev->data->dev_link.link_status = 0;
1257         bnxt_set_hwrm_link_config(bp, false);
1258         bp->link_info->link_up = 0;
1259
1260         return 0;
1261 }
1262
1263 /* Unload the driver, release resources */
1264 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
1265 {
1266         struct bnxt *bp = eth_dev->data->dev_private;
1267         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1268         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1269
1270         if (BNXT_TRUFLOW_EN(bp))
1271                 bnxt_ulp_deinit(bp);
1272
1273         eth_dev->data->dev_started = 0;
1274         /* Prevent crashes when queues are still in use */
1275         eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
1276         eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
1277
1278         bnxt_disable_int(bp);
1279
1280         /* disable uio/vfio intr/eventfd mapping */
1281         rte_intr_disable(intr_handle);
1282
1283         bnxt_cancel_fw_health_check(bp);
1284
1285         bnxt_dev_set_link_down_op(eth_dev);
1286
1287         /* Wait for link to be reset and the async notification to process.
1288          * During reset recovery, there is no need to wait and
1289          * VF/NPAR functions do not have privilege to change PHY config.
1290          */
1291         if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp))
1292                 bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN);
1293
1294         /* Clean queue intr-vector mapping */
1295         rte_intr_efd_disable(intr_handle);
1296         if (intr_handle->intr_vec != NULL) {
1297                 rte_free(intr_handle->intr_vec);
1298                 intr_handle->intr_vec = NULL;
1299         }
1300
1301         bnxt_hwrm_port_clr_stats(bp);
1302         bnxt_free_tx_mbufs(bp);
1303         bnxt_free_rx_mbufs(bp);
1304         /* Process any remaining notifications in default completion queue */
1305         bnxt_int_handler(eth_dev);
1306         bnxt_shutdown_nic(bp);
1307         bnxt_hwrm_if_change(bp, false);
1308
1309         rte_free(bp->mark_table);
1310         bp->mark_table = NULL;
1311
1312         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1313         bp->rx_cosq_cnt = 0;
1314         /* All filters are deleted on a port stop. */
1315         if (BNXT_FLOW_XSTATS_EN(bp))
1316                 bp->flow_stat->flow_count = 0;
1317 }
1318
1319 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
1320 {
1321         struct bnxt *bp = eth_dev->data->dev_private;
1322
1323         /* cancel the recovery handler before remove dev */
1324         rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
1325         rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
1326         bnxt_cancel_fc_thread(bp);
1327
1328         if (eth_dev->data->dev_started)
1329                 bnxt_dev_stop_op(eth_dev);
1330
1331         bnxt_uninit_resources(bp, false);
1332
1333         bnxt_free_leds_info(bp);
1334         bnxt_free_cos_queues(bp);
1335         bnxt_free_link_info(bp);
1336         bnxt_free_pf_info(bp);
1337
1338         eth_dev->dev_ops = NULL;
1339         eth_dev->rx_pkt_burst = NULL;
1340         eth_dev->tx_pkt_burst = NULL;
1341
1342         rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
1343         bp->tx_mem_zone = NULL;
1344         rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
1345         bp->rx_mem_zone = NULL;
1346
1347         rte_free(bp->pf->vf_info);
1348         bp->pf->vf_info = NULL;
1349
1350         rte_free(bp->grp_info);
1351         bp->grp_info = NULL;
1352 }
1353
1354 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
1355                                     uint32_t index)
1356 {
1357         struct bnxt *bp = eth_dev->data->dev_private;
1358         uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
1359         struct bnxt_vnic_info *vnic;
1360         struct bnxt_filter_info *filter, *temp_filter;
1361         uint32_t i;
1362
1363         if (is_bnxt_in_error(bp))
1364                 return;
1365
1366         /*
1367          * Loop through all VNICs from the specified filter flow pools to
1368          * remove the corresponding MAC addr filter
1369          */
1370         for (i = 0; i < bp->nr_vnics; i++) {
1371                 if (!(pool_mask & (1ULL << i)))
1372                         continue;
1373
1374                 vnic = &bp->vnic_info[i];
1375                 filter = STAILQ_FIRST(&vnic->filter);
1376                 while (filter) {
1377                         temp_filter = STAILQ_NEXT(filter, next);
1378                         if (filter->mac_index == index) {
1379                                 STAILQ_REMOVE(&vnic->filter, filter,
1380                                                 bnxt_filter_info, next);
1381                                 bnxt_hwrm_clear_l2_filter(bp, filter);
1382                                 bnxt_free_filter(bp, filter);
1383                         }
1384                         filter = temp_filter;
1385                 }
1386         }
1387 }
1388
1389 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1390                                struct rte_ether_addr *mac_addr, uint32_t index,
1391                                uint32_t pool)
1392 {
1393         struct bnxt_filter_info *filter;
1394         int rc = 0;
1395
1396         /* Attach requested MAC address to the new l2_filter */
1397         STAILQ_FOREACH(filter, &vnic->filter, next) {
1398                 if (filter->mac_index == index) {
1399                         PMD_DRV_LOG(DEBUG,
1400                                     "MAC addr already existed for pool %d\n",
1401                                     pool);
1402                         return 0;
1403                 }
1404         }
1405
1406         filter = bnxt_alloc_filter(bp);
1407         if (!filter) {
1408                 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
1409                 return -ENODEV;
1410         }
1411
1412         /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So,
1413          * if the MAC that's been programmed now is a different one, then,
1414          * copy that addr to filter->l2_addr
1415          */
1416         if (mac_addr)
1417                 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1418         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
1419
1420         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1421         if (!rc) {
1422                 filter->mac_index = index;
1423                 if (filter->mac_index == 0)
1424                         STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
1425                 else
1426                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1427         } else {
1428                 bnxt_free_filter(bp, filter);
1429         }
1430
1431         return rc;
1432 }
1433
1434 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
1435                                 struct rte_ether_addr *mac_addr,
1436                                 uint32_t index, uint32_t pool)
1437 {
1438         struct bnxt *bp = eth_dev->data->dev_private;
1439         struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
1440         int rc = 0;
1441
1442         rc = is_bnxt_in_error(bp);
1443         if (rc)
1444                 return rc;
1445
1446         if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
1447                 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
1448                 return -ENOTSUP;
1449         }
1450
1451         if (!vnic) {
1452                 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
1453                 return -EINVAL;
1454         }
1455
1456         /* Filter settings will get applied when port is started */
1457         if (!eth_dev->data->dev_started)
1458                 return 0;
1459
1460         rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool);
1461
1462         return rc;
1463 }
1464
1465 int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
1466                      bool exp_link_status)
1467 {
1468         int rc = 0;
1469         struct bnxt *bp = eth_dev->data->dev_private;
1470         struct rte_eth_link new;
1471         int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT :
1472                   BNXT_LINK_DOWN_WAIT_CNT;
1473
1474         rc = is_bnxt_in_error(bp);
1475         if (rc)
1476                 return rc;
1477
1478         memset(&new, 0, sizeof(new));
1479         do {
1480                 /* Retrieve link info from hardware */
1481                 rc = bnxt_get_hwrm_link_config(bp, &new);
1482                 if (rc) {
1483                         new.link_speed = ETH_LINK_SPEED_100M;
1484                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
1485                         PMD_DRV_LOG(ERR,
1486                                 "Failed to retrieve link rc = 0x%x!\n", rc);
1487                         goto out;
1488                 }
1489
1490                 if (!wait_to_complete || new.link_status == exp_link_status)
1491                         break;
1492
1493                 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1494         } while (cnt--);
1495
1496 out:
1497         /* Timed out or success */
1498         if (new.link_status != eth_dev->data->dev_link.link_status ||
1499         new.link_speed != eth_dev->data->dev_link.link_speed) {
1500                 rte_eth_linkstatus_set(eth_dev, &new);
1501
1502                 _rte_eth_dev_callback_process(eth_dev,
1503                                               RTE_ETH_EVENT_INTR_LSC,
1504                                               NULL);
1505
1506                 bnxt_print_link_info(eth_dev);
1507         }
1508
1509         return rc;
1510 }
1511
1512 static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
1513                                int wait_to_complete)
1514 {
1515         return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
1516 }
1517
1518 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
1519 {
1520         struct bnxt *bp = eth_dev->data->dev_private;
1521         struct bnxt_vnic_info *vnic;
1522         uint32_t old_flags;
1523         int rc;
1524
1525         rc = is_bnxt_in_error(bp);
1526         if (rc)
1527                 return rc;
1528
1529         /* Filter settings will get applied when port is started */
1530         if (!eth_dev->data->dev_started)
1531                 return 0;
1532
1533         if (bp->vnic_info == NULL)
1534                 return 0;
1535
1536         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1537
1538         old_flags = vnic->flags;
1539         vnic->flags |= BNXT_VNIC_INFO_PROMISC;
1540         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1541         if (rc != 0)
1542                 vnic->flags = old_flags;
1543
1544         return rc;
1545 }
1546
1547 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
1548 {
1549         struct bnxt *bp = eth_dev->data->dev_private;
1550         struct bnxt_vnic_info *vnic;
1551         uint32_t old_flags;
1552         int rc;
1553
1554         rc = is_bnxt_in_error(bp);
1555         if (rc)
1556                 return rc;
1557
1558         /* Filter settings will get applied when port is started */
1559         if (!eth_dev->data->dev_started)
1560                 return 0;
1561
1562         if (bp->vnic_info == NULL)
1563                 return 0;
1564
1565         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1566
1567         old_flags = vnic->flags;
1568         vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
1569         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1570         if (rc != 0)
1571                 vnic->flags = old_flags;
1572
1573         return rc;
1574 }
1575
1576 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
1577 {
1578         struct bnxt *bp = eth_dev->data->dev_private;
1579         struct bnxt_vnic_info *vnic;
1580         uint32_t old_flags;
1581         int rc;
1582
1583         rc = is_bnxt_in_error(bp);
1584         if (rc)
1585                 return rc;
1586
1587         /* Filter settings will get applied when port is started */
1588         if (!eth_dev->data->dev_started)
1589                 return 0;
1590
1591         if (bp->vnic_info == NULL)
1592                 return 0;
1593
1594         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1595
1596         old_flags = vnic->flags;
1597         vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1598         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1599         if (rc != 0)
1600                 vnic->flags = old_flags;
1601
1602         return rc;
1603 }
1604
1605 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
1606 {
1607         struct bnxt *bp = eth_dev->data->dev_private;
1608         struct bnxt_vnic_info *vnic;
1609         uint32_t old_flags;
1610         int rc;
1611
1612         rc = is_bnxt_in_error(bp);
1613         if (rc)
1614                 return rc;
1615
1616         /* Filter settings will get applied when port is started */
1617         if (!eth_dev->data->dev_started)
1618                 return 0;
1619
1620         if (bp->vnic_info == NULL)
1621                 return 0;
1622
1623         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1624
1625         old_flags = vnic->flags;
1626         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1627         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1628         if (rc != 0)
1629                 vnic->flags = old_flags;
1630
1631         return rc;
1632 }
1633
1634 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */
1635 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
1636 {
1637         if (qid >= bp->rx_nr_rings)
1638                 return NULL;
1639
1640         return bp->eth_dev->data->rx_queues[qid];
1641 }
1642
1643 /* Return rxq corresponding to a given rss table ring/group ID. */
1644 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
1645 {
1646         struct bnxt_rx_queue *rxq;
1647         unsigned int i;
1648
1649         if (!BNXT_HAS_RING_GRPS(bp)) {
1650                 for (i = 0; i < bp->rx_nr_rings; i++) {
1651                         rxq = bp->eth_dev->data->rx_queues[i];
1652                         if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
1653                                 return rxq->index;
1654                 }
1655         } else {
1656                 for (i = 0; i < bp->rx_nr_rings; i++) {
1657                         if (bp->grp_info[i].fw_grp_id == fwr)
1658                                 return i;
1659                 }
1660         }
1661
1662         return INVALID_HW_RING_ID;
1663 }
1664
1665 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
1666                             struct rte_eth_rss_reta_entry64 *reta_conf,
1667                             uint16_t reta_size)
1668 {
1669         struct bnxt *bp = eth_dev->data->dev_private;
1670         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1671         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1672         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1673         uint16_t idx, sft;
1674         int i, rc;
1675
1676         rc = is_bnxt_in_error(bp);
1677         if (rc)
1678                 return rc;
1679
1680         if (!vnic->rss_table)
1681                 return -EINVAL;
1682
1683         if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
1684                 return -EINVAL;
1685
1686         if (reta_size != tbl_size) {
1687                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1688                         "(%d) must equal the size supported by the hardware "
1689                         "(%d)\n", reta_size, tbl_size);
1690                 return -EINVAL;
1691         }
1692
1693         for (i = 0; i < reta_size; i++) {
1694                 struct bnxt_rx_queue *rxq;
1695
1696                 idx = i / RTE_RETA_GROUP_SIZE;
1697                 sft = i % RTE_RETA_GROUP_SIZE;
1698
1699                 if (!(reta_conf[idx].mask & (1ULL << sft)))
1700                         continue;
1701
1702                 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
1703                 if (!rxq) {
1704                         PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
1705                         return -EINVAL;
1706                 }
1707
1708                 if (BNXT_CHIP_THOR(bp)) {
1709                         vnic->rss_table[i * 2] =
1710                                 rxq->rx_ring->rx_ring_struct->fw_ring_id;
1711                         vnic->rss_table[i * 2 + 1] =
1712                                 rxq->cp_ring->cp_ring_struct->fw_ring_id;
1713                 } else {
1714                         vnic->rss_table[i] =
1715                             vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
1716                 }
1717         }
1718
1719         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1720         return 0;
1721 }
1722
1723 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
1724                               struct rte_eth_rss_reta_entry64 *reta_conf,
1725                               uint16_t reta_size)
1726 {
1727         struct bnxt *bp = eth_dev->data->dev_private;
1728         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1729         uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
1730         uint16_t idx, sft, i;
1731         int rc;
1732
1733         rc = is_bnxt_in_error(bp);
1734         if (rc)
1735                 return rc;
1736
1737         /* Retrieve from the default VNIC */
1738         if (!vnic)
1739                 return -EINVAL;
1740         if (!vnic->rss_table)
1741                 return -EINVAL;
1742
1743         if (reta_size != tbl_size) {
1744                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
1745                         "(%d) must equal the size supported by the hardware "
1746                         "(%d)\n", reta_size, tbl_size);
1747                 return -EINVAL;
1748         }
1749
1750         for (idx = 0, i = 0; i < reta_size; i++) {
1751                 idx = i / RTE_RETA_GROUP_SIZE;
1752                 sft = i % RTE_RETA_GROUP_SIZE;
1753
1754                 if (reta_conf[idx].mask & (1ULL << sft)) {
1755                         uint16_t qid;
1756
1757                         if (BNXT_CHIP_THOR(bp))
1758                                 qid = bnxt_rss_to_qid(bp,
1759                                                       vnic->rss_table[i * 2]);
1760                         else
1761                                 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
1762
1763                         if (qid == INVALID_HW_RING_ID) {
1764                                 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
1765                                 return -EINVAL;
1766                         }
1767                         reta_conf[idx].reta[sft] = qid;
1768                 }
1769         }
1770
1771         return 0;
1772 }
1773
1774 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
1775                                    struct rte_eth_rss_conf *rss_conf)
1776 {
1777         struct bnxt *bp = eth_dev->data->dev_private;
1778         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1779         struct bnxt_vnic_info *vnic;
1780         int rc;
1781
1782         rc = is_bnxt_in_error(bp);
1783         if (rc)
1784                 return rc;
1785
1786         /*
1787          * If RSS enablement were different than dev_configure,
1788          * then return -EINVAL
1789          */
1790         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
1791                 if (!rss_conf->rss_hf)
1792                         PMD_DRV_LOG(ERR, "Hash type NONE\n");
1793         } else {
1794                 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
1795                         return -EINVAL;
1796         }
1797
1798         bp->flags |= BNXT_FLAG_UPDATE_HASH;
1799         memcpy(&eth_dev->data->dev_conf.rx_adv_conf.rss_conf,
1800                rss_conf,
1801                sizeof(*rss_conf));
1802
1803         /* Update the default RSS VNIC(s) */
1804         vnic = BNXT_GET_DEFAULT_VNIC(bp);
1805         vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
1806
1807         /*
1808          * If hashkey is not specified, use the previously configured
1809          * hashkey
1810          */
1811         if (!rss_conf->rss_key)
1812                 goto rss_config;
1813
1814         if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
1815                 PMD_DRV_LOG(ERR,
1816                             "Invalid hashkey length, should be 16 bytes\n");
1817                 return -EINVAL;
1818         }
1819         memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
1820
1821 rss_config:
1822         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1823         return 0;
1824 }
1825
1826 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
1827                                      struct rte_eth_rss_conf *rss_conf)
1828 {
1829         struct bnxt *bp = eth_dev->data->dev_private;
1830         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
1831         int len, rc;
1832         uint32_t hash_types;
1833
1834         rc = is_bnxt_in_error(bp);
1835         if (rc)
1836                 return rc;
1837
1838         /* RSS configuration is the same for all VNICs */
1839         if (vnic && vnic->rss_hash_key) {
1840                 if (rss_conf->rss_key) {
1841                         len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
1842                               rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
1843                         memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
1844                 }
1845
1846                 hash_types = vnic->hash_type;
1847                 rss_conf->rss_hf = 0;
1848                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
1849                         rss_conf->rss_hf |= ETH_RSS_IPV4;
1850                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
1851                 }
1852                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
1853                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1854                         hash_types &=
1855                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
1856                 }
1857                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
1858                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1859                         hash_types &=
1860                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
1861                 }
1862                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
1863                         rss_conf->rss_hf |= ETH_RSS_IPV6;
1864                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
1865                 }
1866                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1867                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1868                         hash_types &=
1869                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1870                 }
1871                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1872                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1873                         hash_types &=
1874                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1875                 }
1876                 if (hash_types) {
1877                         PMD_DRV_LOG(ERR,
1878                                 "Unknown RSS config from firmware (%08x), RSS disabled",
1879                                 vnic->hash_type);
1880                         return -ENOTSUP;
1881                 }
1882         } else {
1883                 rss_conf->rss_hf = 0;
1884         }
1885         return 0;
1886 }
1887
1888 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1889                                struct rte_eth_fc_conf *fc_conf)
1890 {
1891         struct bnxt *bp = dev->data->dev_private;
1892         struct rte_eth_link link_info;
1893         int rc;
1894
1895         rc = is_bnxt_in_error(bp);
1896         if (rc)
1897                 return rc;
1898
1899         rc = bnxt_get_hwrm_link_config(bp, &link_info);
1900         if (rc)
1901                 return rc;
1902
1903         memset(fc_conf, 0, sizeof(*fc_conf));
1904         if (bp->link_info->auto_pause)
1905                 fc_conf->autoneg = 1;
1906         switch (bp->link_info->pause) {
1907         case 0:
1908                 fc_conf->mode = RTE_FC_NONE;
1909                 break;
1910         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1911                 fc_conf->mode = RTE_FC_TX_PAUSE;
1912                 break;
1913         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1914                 fc_conf->mode = RTE_FC_RX_PAUSE;
1915                 break;
1916         case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1917                         HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1918                 fc_conf->mode = RTE_FC_FULL;
1919                 break;
1920         }
1921         return 0;
1922 }
1923
1924 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1925                                struct rte_eth_fc_conf *fc_conf)
1926 {
1927         struct bnxt *bp = dev->data->dev_private;
1928         int rc;
1929
1930         rc = is_bnxt_in_error(bp);
1931         if (rc)
1932                 return rc;
1933
1934         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1935                 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
1936                 return -ENOTSUP;
1937         }
1938
1939         switch (fc_conf->mode) {
1940         case RTE_FC_NONE:
1941                 bp->link_info->auto_pause = 0;
1942                 bp->link_info->force_pause = 0;
1943                 break;
1944         case RTE_FC_RX_PAUSE:
1945                 if (fc_conf->autoneg) {
1946                         bp->link_info->auto_pause =
1947                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1948                         bp->link_info->force_pause = 0;
1949                 } else {
1950                         bp->link_info->auto_pause = 0;
1951                         bp->link_info->force_pause =
1952                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1953                 }
1954                 break;
1955         case RTE_FC_TX_PAUSE:
1956                 if (fc_conf->autoneg) {
1957                         bp->link_info->auto_pause =
1958                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1959                         bp->link_info->force_pause = 0;
1960                 } else {
1961                         bp->link_info->auto_pause = 0;
1962                         bp->link_info->force_pause =
1963                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1964                 }
1965                 break;
1966         case RTE_FC_FULL:
1967                 if (fc_conf->autoneg) {
1968                         bp->link_info->auto_pause =
1969                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1970                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1971                         bp->link_info->force_pause = 0;
1972                 } else {
1973                         bp->link_info->auto_pause = 0;
1974                         bp->link_info->force_pause =
1975                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1976                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1977                 }
1978                 break;
1979         }
1980         return bnxt_set_hwrm_link_config(bp, true);
1981 }
1982
1983 /* Add UDP tunneling port */
1984 static int
1985 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1986                          struct rte_eth_udp_tunnel *udp_tunnel)
1987 {
1988         struct bnxt *bp = eth_dev->data->dev_private;
1989         uint16_t tunnel_type = 0;
1990         int rc = 0;
1991
1992         rc = is_bnxt_in_error(bp);
1993         if (rc)
1994                 return rc;
1995
1996         switch (udp_tunnel->prot_type) {
1997         case RTE_TUNNEL_TYPE_VXLAN:
1998                 if (bp->vxlan_port_cnt) {
1999                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2000                                 udp_tunnel->udp_port);
2001                         if (bp->vxlan_port != udp_tunnel->udp_port) {
2002                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2003                                 return -ENOSPC;
2004                         }
2005                         bp->vxlan_port_cnt++;
2006                         return 0;
2007                 }
2008                 tunnel_type =
2009                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
2010                 bp->vxlan_port_cnt++;
2011                 break;
2012         case RTE_TUNNEL_TYPE_GENEVE:
2013                 if (bp->geneve_port_cnt) {
2014                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
2015                                 udp_tunnel->udp_port);
2016                         if (bp->geneve_port != udp_tunnel->udp_port) {
2017                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
2018                                 return -ENOSPC;
2019                         }
2020                         bp->geneve_port_cnt++;
2021                         return 0;
2022                 }
2023                 tunnel_type =
2024                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
2025                 bp->geneve_port_cnt++;
2026                 break;
2027         default:
2028                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2029                 return -ENOTSUP;
2030         }
2031         rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
2032                                              tunnel_type);
2033         return rc;
2034 }
2035
2036 static int
2037 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
2038                          struct rte_eth_udp_tunnel *udp_tunnel)
2039 {
2040         struct bnxt *bp = eth_dev->data->dev_private;
2041         uint16_t tunnel_type = 0;
2042         uint16_t port = 0;
2043         int rc = 0;
2044
2045         rc = is_bnxt_in_error(bp);
2046         if (rc)
2047                 return rc;
2048
2049         switch (udp_tunnel->prot_type) {
2050         case RTE_TUNNEL_TYPE_VXLAN:
2051                 if (!bp->vxlan_port_cnt) {
2052                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2053                         return -EINVAL;
2054                 }
2055                 if (bp->vxlan_port != udp_tunnel->udp_port) {
2056                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2057                                 udp_tunnel->udp_port, bp->vxlan_port);
2058                         return -EINVAL;
2059                 }
2060                 if (--bp->vxlan_port_cnt)
2061                         return 0;
2062
2063                 tunnel_type =
2064                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
2065                 port = bp->vxlan_fw_dst_port_id;
2066                 break;
2067         case RTE_TUNNEL_TYPE_GENEVE:
2068                 if (!bp->geneve_port_cnt) {
2069                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
2070                         return -EINVAL;
2071                 }
2072                 if (bp->geneve_port != udp_tunnel->udp_port) {
2073                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
2074                                 udp_tunnel->udp_port, bp->geneve_port);
2075                         return -EINVAL;
2076                 }
2077                 if (--bp->geneve_port_cnt)
2078                         return 0;
2079
2080                 tunnel_type =
2081                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
2082                 port = bp->geneve_fw_dst_port_id;
2083                 break;
2084         default:
2085                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
2086                 return -ENOTSUP;
2087         }
2088
2089         rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
2090         if (!rc) {
2091                 if (tunnel_type ==
2092                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
2093                         bp->vxlan_port = 0;
2094                 if (tunnel_type ==
2095                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
2096                         bp->geneve_port = 0;
2097         }
2098         return rc;
2099 }
2100
2101 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2102 {
2103         struct bnxt_filter_info *filter;
2104         struct bnxt_vnic_info *vnic;
2105         int rc = 0;
2106         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2107
2108         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2109         filter = STAILQ_FIRST(&vnic->filter);
2110         while (filter) {
2111                 /* Search for this matching MAC+VLAN filter */
2112                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) {
2113                         /* Delete the filter */
2114                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2115                         if (rc)
2116                                 return rc;
2117                         STAILQ_REMOVE(&vnic->filter, filter,
2118                                       bnxt_filter_info, next);
2119                         bnxt_free_filter(bp, filter);
2120                         PMD_DRV_LOG(INFO,
2121                                     "Deleted vlan filter for %d\n",
2122                                     vlan_id);
2123                         return 0;
2124                 }
2125                 filter = STAILQ_NEXT(filter, next);
2126         }
2127         return -ENOENT;
2128 }
2129
2130 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
2131 {
2132         struct bnxt_filter_info *filter;
2133         struct bnxt_vnic_info *vnic;
2134         int rc = 0;
2135         uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
2136                 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
2137         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
2138
2139         /* Implementation notes on the use of VNIC in this command:
2140          *
2141          * By default, these filters belong to default vnic for the function.
2142          * Once these filters are set up, only destination VNIC can be modified.
2143          * If the destination VNIC is not specified in this command,
2144          * then the HWRM shall only create an l2 context id.
2145          */
2146
2147         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2148         filter = STAILQ_FIRST(&vnic->filter);
2149         /* Check if the VLAN has already been added */
2150         while (filter) {
2151                 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id))
2152                         return -EEXIST;
2153
2154                 filter = STAILQ_NEXT(filter, next);
2155         }
2156
2157         /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC
2158          * command to create MAC+VLAN filter with the right flags, enables set.
2159          */
2160         filter = bnxt_alloc_filter(bp);
2161         if (!filter) {
2162                 PMD_DRV_LOG(ERR,
2163                             "MAC/VLAN filter alloc failed\n");
2164                 return -ENOMEM;
2165         }
2166         /* MAC + VLAN ID filter */
2167         /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only
2168          * untagged packets are received
2169          *
2170          * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged
2171          * packets and only the programmed vlan's packets are received
2172          */
2173         filter->l2_ivlan = vlan_id;
2174         filter->l2_ivlan_mask = 0x0FFF;
2175         filter->enables |= en;
2176         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
2177
2178         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
2179         if (rc) {
2180                 /* Free the newly allocated filter as we were
2181                  * not able to create the filter in hardware.
2182                  */
2183                 bnxt_free_filter(bp, filter);
2184                 return rc;
2185         }
2186
2187         filter->mac_index = 0;
2188         /* Add this new filter to the list */
2189         if (vlan_id == 0)
2190                 STAILQ_INSERT_HEAD(&vnic->filter, filter, next);
2191         else
2192                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2193
2194         PMD_DRV_LOG(INFO,
2195                     "Added Vlan filter for %d\n", vlan_id);
2196         return rc;
2197 }
2198
2199 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
2200                 uint16_t vlan_id, int on)
2201 {
2202         struct bnxt *bp = eth_dev->data->dev_private;
2203         int rc;
2204
2205         rc = is_bnxt_in_error(bp);
2206         if (rc)
2207                 return rc;
2208
2209         if (!eth_dev->data->dev_started) {
2210                 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n");
2211                 return -EINVAL;
2212         }
2213
2214         /* These operations apply to ALL existing MAC/VLAN filters */
2215         if (on)
2216                 return bnxt_add_vlan_filter(bp, vlan_id);
2217         else
2218                 return bnxt_del_vlan_filter(bp, vlan_id);
2219 }
2220
2221 static int bnxt_del_dflt_mac_filter(struct bnxt *bp,
2222                                     struct bnxt_vnic_info *vnic)
2223 {
2224         struct bnxt_filter_info *filter;
2225         int rc;
2226
2227         filter = STAILQ_FIRST(&vnic->filter);
2228         while (filter) {
2229                 if (filter->mac_index == 0 &&
2230                     !memcmp(filter->l2_addr, bp->mac_addr,
2231                             RTE_ETHER_ADDR_LEN)) {
2232                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2233                         if (!rc) {
2234                                 STAILQ_REMOVE(&vnic->filter, filter,
2235                                               bnxt_filter_info, next);
2236                                 bnxt_free_filter(bp, filter);
2237                         }
2238                         return rc;
2239                 }
2240                 filter = STAILQ_NEXT(filter, next);
2241         }
2242         return 0;
2243 }
2244
2245 static int
2246 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
2247 {
2248         struct bnxt_vnic_info *vnic;
2249         unsigned int i;
2250         int rc;
2251
2252         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2253         if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
2254                 /* Remove any VLAN filters programmed */
2255                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2256                         bnxt_del_vlan_filter(bp, i);
2257
2258                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2259                 if (rc)
2260                         return rc;
2261         } else {
2262                 /* Default filter will allow packets that match the
2263                  * dest mac. So, it has to be deleted, otherwise, we
2264                  * will endup receiving vlan packets for which the
2265                  * filter is not programmed, when hw-vlan-filter
2266                  * configuration is ON
2267                  */
2268                 bnxt_del_dflt_mac_filter(bp, vnic);
2269                 /* This filter will allow only untagged packets */
2270                 bnxt_add_vlan_filter(bp, 0);
2271         }
2272         PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
2273                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
2274
2275         return 0;
2276 }
2277
2278 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
2279 {
2280         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2281         unsigned int i;
2282         int rc;
2283
2284         /* Destroy vnic filters and vnic */
2285         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2286             DEV_RX_OFFLOAD_VLAN_FILTER) {
2287                 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
2288                         bnxt_del_vlan_filter(bp, i);
2289         }
2290         bnxt_del_dflt_mac_filter(bp, vnic);
2291
2292         rc = bnxt_hwrm_vnic_free(bp, vnic);
2293         if (rc)
2294                 return rc;
2295
2296         rte_free(vnic->fw_grp_ids);
2297         vnic->fw_grp_ids = NULL;
2298
2299         vnic->rx_queue_cnt = 0;
2300
2301         return 0;
2302 }
2303
2304 static int
2305 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
2306 {
2307         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2308         int rc;
2309
2310         /* Destroy, recreate and reconfigure the default vnic */
2311         rc = bnxt_free_one_vnic(bp, 0);
2312         if (rc)
2313                 return rc;
2314
2315         /* default vnic 0 */
2316         rc = bnxt_setup_one_vnic(bp, 0);
2317         if (rc)
2318                 return rc;
2319
2320         if (bp->eth_dev->data->dev_conf.rxmode.offloads &
2321             DEV_RX_OFFLOAD_VLAN_FILTER) {
2322                 rc = bnxt_add_vlan_filter(bp, 0);
2323                 if (rc)
2324                         return rc;
2325                 rc = bnxt_restore_vlan_filters(bp);
2326                 if (rc)
2327                         return rc;
2328         } else {
2329                 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0);
2330                 if (rc)
2331                         return rc;
2332         }
2333
2334         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2335         if (rc)
2336                 return rc;
2337
2338         PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
2339                     !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
2340
2341         return rc;
2342 }
2343
2344 static int
2345 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
2346 {
2347         uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
2348         struct bnxt *bp = dev->data->dev_private;
2349         int rc;
2350
2351         rc = is_bnxt_in_error(bp);
2352         if (rc)
2353                 return rc;
2354
2355         /* Filter settings will get applied when port is started */
2356         if (!dev->data->dev_started)
2357                 return 0;
2358
2359         if (mask & ETH_VLAN_FILTER_MASK) {
2360                 /* Enable or disable VLAN filtering */
2361                 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
2362                 if (rc)
2363                         return rc;
2364         }
2365
2366         if (mask & ETH_VLAN_STRIP_MASK) {
2367                 /* Enable or disable VLAN stripping */
2368                 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
2369                 if (rc)
2370                         return rc;
2371         }
2372
2373         if (mask & ETH_VLAN_EXTEND_MASK) {
2374                 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2375                         PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
2376                 else
2377                         PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
2378         }
2379
2380         return 0;
2381 }
2382
2383 static int
2384 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
2385                       uint16_t tpid)
2386 {
2387         struct bnxt *bp = dev->data->dev_private;
2388         int qinq = dev->data->dev_conf.rxmode.offloads &
2389                    DEV_RX_OFFLOAD_VLAN_EXTEND;
2390
2391         if (vlan_type != ETH_VLAN_TYPE_INNER &&
2392             vlan_type != ETH_VLAN_TYPE_OUTER) {
2393                 PMD_DRV_LOG(ERR,
2394                             "Unsupported vlan type.");
2395                 return -EINVAL;
2396         }
2397         if (!qinq) {
2398                 PMD_DRV_LOG(ERR,
2399                             "QinQ not enabled. Needs to be ON as we can "
2400                             "accelerate only outer vlan\n");
2401                 return -EINVAL;
2402         }
2403
2404         if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2405                 switch (tpid) {
2406                 case RTE_ETHER_TYPE_QINQ:
2407                         bp->outer_tpid_bd =
2408                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8;
2409                                 break;
2410                 case RTE_ETHER_TYPE_VLAN:
2411                         bp->outer_tpid_bd =
2412                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
2413                                 break;
2414                 case 0x9100:
2415                         bp->outer_tpid_bd =
2416                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
2417                                 break;
2418                 case 0x9200:
2419                         bp->outer_tpid_bd =
2420                                 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
2421                                 break;
2422                 case 0x9300:
2423                         bp->outer_tpid_bd =
2424                                  TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
2425                                 break;
2426                 default:
2427                         PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid);
2428                         return -EINVAL;
2429                 }
2430                 bp->outer_tpid_bd |= tpid;
2431                 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
2432         } else if (vlan_type == ETH_VLAN_TYPE_INNER) {
2433                 PMD_DRV_LOG(ERR,
2434                             "Can accelerate only outer vlan in QinQ\n");
2435                 return -EINVAL;
2436         }
2437
2438         return 0;
2439 }
2440
2441 static int
2442 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
2443                              struct rte_ether_addr *addr)
2444 {
2445         struct bnxt *bp = dev->data->dev_private;
2446         /* Default Filter is tied to VNIC 0 */
2447         struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp);
2448         int rc;
2449
2450         rc = is_bnxt_in_error(bp);
2451         if (rc)
2452                 return rc;
2453
2454         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
2455                 return -EPERM;
2456
2457         if (rte_is_zero_ether_addr(addr))
2458                 return -EINVAL;
2459
2460         /* Filter settings will get applied when port is started */
2461         if (!dev->data->dev_started)
2462                 return 0;
2463
2464         /* Check if the requested MAC is already added */
2465         if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0)
2466                 return 0;
2467
2468         /* Destroy filter and re-create it */
2469         bnxt_del_dflt_mac_filter(bp, vnic);
2470
2471         memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
2472         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
2473                 /* This filter will allow only untagged packets */
2474                 rc = bnxt_add_vlan_filter(bp, 0);
2475         } else {
2476                 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0);
2477         }
2478
2479         PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
2480         return rc;
2481 }
2482
2483 static int
2484 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
2485                           struct rte_ether_addr *mc_addr_set,
2486                           uint32_t nb_mc_addr)
2487 {
2488         struct bnxt *bp = eth_dev->data->dev_private;
2489         char *mc_addr_list = (char *)mc_addr_set;
2490         struct bnxt_vnic_info *vnic;
2491         uint32_t off = 0, i = 0;
2492         int rc;
2493
2494         rc = is_bnxt_in_error(bp);
2495         if (rc)
2496                 return rc;
2497
2498         vnic = BNXT_GET_DEFAULT_VNIC(bp);
2499
2500         if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
2501                 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
2502                 goto allmulti;
2503         }
2504
2505         /* TODO Check for Duplicate mcast addresses */
2506         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
2507         for (i = 0; i < nb_mc_addr; i++) {
2508                 memcpy(vnic->mc_list + off, &mc_addr_list[i],
2509                         RTE_ETHER_ADDR_LEN);
2510                 off += RTE_ETHER_ADDR_LEN;
2511         }
2512
2513         vnic->mc_addr_cnt = i;
2514         if (vnic->mc_addr_cnt)
2515                 vnic->flags |= BNXT_VNIC_INFO_MCAST;
2516         else
2517                 vnic->flags &= ~BNXT_VNIC_INFO_MCAST;
2518
2519 allmulti:
2520         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2521 }
2522
2523 static int
2524 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2525 {
2526         struct bnxt *bp = dev->data->dev_private;
2527         uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
2528         uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
2529         uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
2530         uint8_t fw_rsvd = bp->fw_ver & 0xff;
2531         int ret;
2532
2533         ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d",
2534                         fw_major, fw_minor, fw_updt, fw_rsvd);
2535
2536         ret += 1; /* add the size of '\0' */
2537         if (fw_size < (uint32_t)ret)
2538                 return ret;
2539         else
2540                 return 0;
2541 }
2542
2543 static void
2544 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2545         struct rte_eth_rxq_info *qinfo)
2546 {
2547         struct bnxt *bp = dev->data->dev_private;
2548         struct bnxt_rx_queue *rxq;
2549
2550         if (is_bnxt_in_error(bp))
2551                 return;
2552
2553         rxq = dev->data->rx_queues[queue_id];
2554
2555         qinfo->mp = rxq->mb_pool;
2556         qinfo->scattered_rx = dev->data->scattered_rx;
2557         qinfo->nb_desc = rxq->nb_rx_desc;
2558
2559         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2560         qinfo->conf.rx_drop_en = 0;
2561         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
2562 }
2563
2564 static void
2565 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
2566         struct rte_eth_txq_info *qinfo)
2567 {
2568         struct bnxt *bp = dev->data->dev_private;
2569         struct bnxt_tx_queue *txq;
2570
2571         if (is_bnxt_in_error(bp))
2572                 return;
2573
2574         txq = dev->data->tx_queues[queue_id];
2575
2576         qinfo->nb_desc = txq->nb_tx_desc;
2577
2578         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2579         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2580         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2581
2582         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
2583         qinfo->conf.tx_rs_thresh = 0;
2584         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
2585 }
2586
2587 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
2588 {
2589         struct bnxt *bp = eth_dev->data->dev_private;
2590         uint32_t new_pkt_size;
2591         uint32_t rc = 0;
2592         uint32_t i;
2593
2594         rc = is_bnxt_in_error(bp);
2595         if (rc)
2596                 return rc;
2597
2598         /* Exit if receive queues are not configured yet */
2599         if (!eth_dev->data->nb_rx_queues)
2600                 return rc;
2601
2602         new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2603                        VLAN_TAG_SIZE * BNXT_NUM_VLANS;
2604
2605 #ifdef RTE_ARCH_X86
2606         /*
2607          * If vector-mode tx/rx is active, disallow any MTU change that would
2608          * require scattered receive support.
2609          */
2610         if (eth_dev->data->dev_started &&
2611             (eth_dev->rx_pkt_burst == bnxt_recv_pkts_vec ||
2612              eth_dev->tx_pkt_burst == bnxt_xmit_pkts_vec) &&
2613             (new_pkt_size >
2614              eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
2615                 PMD_DRV_LOG(ERR,
2616                             "MTU change would require scattered rx support. ");
2617                 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n");
2618                 return -EINVAL;
2619         }
2620 #endif
2621
2622         if (new_mtu > RTE_ETHER_MTU) {
2623                 bp->flags |= BNXT_FLAG_JUMBO;
2624                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
2625                         DEV_RX_OFFLOAD_JUMBO_FRAME;
2626         } else {
2627                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
2628                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2629                 bp->flags &= ~BNXT_FLAG_JUMBO;
2630         }
2631
2632         /* Is there a change in mtu setting? */
2633         if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
2634                 return rc;
2635
2636         for (i = 0; i < bp->nr_vnics; i++) {
2637                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2638                 uint16_t size = 0;
2639
2640                 vnic->mru = BNXT_VNIC_MRU(new_mtu);
2641                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
2642                 if (rc)
2643                         break;
2644
2645                 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2646                 size -= RTE_PKTMBUF_HEADROOM;
2647
2648                 if (size < new_mtu) {
2649                         rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
2650                         if (rc)
2651                                 return rc;
2652                 }
2653         }
2654
2655         if (!rc)
2656                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
2657
2658         PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
2659
2660         return rc;
2661 }
2662
2663 static int
2664 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
2665 {
2666         struct bnxt *bp = dev->data->dev_private;
2667         uint16_t vlan = bp->vlan;
2668         int rc;
2669
2670         rc = is_bnxt_in_error(bp);
2671         if (rc)
2672                 return rc;
2673
2674         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
2675                 PMD_DRV_LOG(ERR,
2676                         "PVID cannot be modified for this function\n");
2677                 return -ENOTSUP;
2678         }
2679         bp->vlan = on ? pvid : 0;
2680
2681         rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
2682         if (rc)
2683                 bp->vlan = vlan;
2684         return rc;
2685 }
2686
2687 static int
2688 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
2689 {
2690         struct bnxt *bp = dev->data->dev_private;
2691         int rc;
2692
2693         rc = is_bnxt_in_error(bp);
2694         if (rc)
2695                 return rc;
2696
2697         return bnxt_hwrm_port_led_cfg(bp, true);
2698 }
2699
2700 static int
2701 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
2702 {
2703         struct bnxt *bp = dev->data->dev_private;
2704         int rc;
2705
2706         rc = is_bnxt_in_error(bp);
2707         if (rc)
2708                 return rc;
2709
2710         return bnxt_hwrm_port_led_cfg(bp, false);
2711 }
2712
2713 static uint32_t
2714 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2715 {
2716         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2717         uint32_t desc = 0, raw_cons = 0, cons;
2718         struct bnxt_cp_ring_info *cpr;
2719         struct bnxt_rx_queue *rxq;
2720         struct rx_pkt_cmpl *rxcmp;
2721         int rc;
2722
2723         rc = is_bnxt_in_error(bp);
2724         if (rc)
2725                 return rc;
2726
2727         rxq = dev->data->rx_queues[rx_queue_id];
2728         cpr = rxq->cp_ring;
2729         raw_cons = cpr->cp_raw_cons;
2730
2731         while (1) {
2732                 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
2733                 rte_prefetch0(&cpr->cp_desc_ring[cons]);
2734                 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2735
2736                 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) {
2737                         break;
2738                 } else {
2739                         raw_cons++;
2740                         desc++;
2741                 }
2742         }
2743
2744         return desc;
2745 }
2746
2747 static int
2748 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
2749 {
2750         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
2751         struct bnxt_rx_ring_info *rxr;
2752         struct bnxt_cp_ring_info *cpr;
2753         struct bnxt_sw_rx_bd *rx_buf;
2754         struct rx_pkt_cmpl *rxcmp;
2755         uint32_t cons, cp_cons;
2756         int rc;
2757
2758         if (!rxq)
2759                 return -EINVAL;
2760
2761         rc = is_bnxt_in_error(rxq->bp);
2762         if (rc)
2763                 return rc;
2764
2765         cpr = rxq->cp_ring;
2766         rxr = rxq->rx_ring;
2767
2768         if (offset >= rxq->nb_rx_desc)
2769                 return -EINVAL;
2770
2771         cons = RING_CMP(cpr->cp_ring_struct, offset);
2772         cp_cons = cpr->cp_raw_cons;
2773         rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2774
2775         if (cons > cp_cons) {
2776                 if (CMPL_VALID(rxcmp, cpr->valid))
2777                         return RTE_ETH_RX_DESC_DONE;
2778         } else {
2779                 if (CMPL_VALID(rxcmp, !cpr->valid))
2780                         return RTE_ETH_RX_DESC_DONE;
2781         }
2782         rx_buf = &rxr->rx_buf_ring[cons];
2783         if (rx_buf->mbuf == NULL)
2784                 return RTE_ETH_RX_DESC_UNAVAIL;
2785
2786
2787         return RTE_ETH_RX_DESC_AVAIL;
2788 }
2789
2790 static int
2791 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
2792 {
2793         struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
2794         struct bnxt_tx_ring_info *txr;
2795         struct bnxt_cp_ring_info *cpr;
2796         struct bnxt_sw_tx_bd *tx_buf;
2797         struct tx_pkt_cmpl *txcmp;
2798         uint32_t cons, cp_cons;
2799         int rc;
2800
2801         if (!txq)
2802                 return -EINVAL;
2803
2804         rc = is_bnxt_in_error(txq->bp);
2805         if (rc)
2806                 return rc;
2807
2808         cpr = txq->cp_ring;
2809         txr = txq->tx_ring;
2810
2811         if (offset >= txq->nb_tx_desc)
2812                 return -EINVAL;
2813
2814         cons = RING_CMP(cpr->cp_ring_struct, offset);
2815         txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
2816         cp_cons = cpr->cp_raw_cons;
2817
2818         if (cons > cp_cons) {
2819                 if (CMPL_VALID(txcmp, cpr->valid))
2820                         return RTE_ETH_TX_DESC_UNAVAIL;
2821         } else {
2822                 if (CMPL_VALID(txcmp, !cpr->valid))
2823                         return RTE_ETH_TX_DESC_UNAVAIL;
2824         }
2825         tx_buf = &txr->tx_buf_ring[cons];
2826         if (tx_buf->mbuf == NULL)
2827                 return RTE_ETH_TX_DESC_DONE;
2828
2829         return RTE_ETH_TX_DESC_FULL;
2830 }
2831
2832 static struct bnxt_filter_info *
2833 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
2834                                 struct rte_eth_ethertype_filter *efilter,
2835                                 struct bnxt_vnic_info *vnic0,
2836                                 struct bnxt_vnic_info *vnic,
2837                                 int *ret)
2838 {
2839         struct bnxt_filter_info *mfilter = NULL;
2840         int match = 0;
2841         *ret = 0;
2842
2843         if (efilter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2844                 efilter->ether_type == RTE_ETHER_TYPE_IPV6) {
2845                 PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
2846                         " ethertype filter.", efilter->ether_type);
2847                 *ret = -EINVAL;
2848                 goto exit;
2849         }
2850         if (efilter->queue >= bp->rx_nr_rings) {
2851                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2852                 *ret = -EINVAL;
2853                 goto exit;
2854         }
2855
2856         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2857         vnic = &bp->vnic_info[efilter->queue];
2858         if (vnic == NULL) {
2859                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
2860                 *ret = -EINVAL;
2861                 goto exit;
2862         }
2863
2864         if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2865                 STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
2866                         if ((!memcmp(efilter->mac_addr.addr_bytes,
2867                                      mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2868                              mfilter->flags ==
2869                              HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
2870                              mfilter->ethertype == efilter->ether_type)) {
2871                                 match = 1;
2872                                 break;
2873                         }
2874                 }
2875         } else {
2876                 STAILQ_FOREACH(mfilter, &vnic->filter, next)
2877                         if ((!memcmp(efilter->mac_addr.addr_bytes,
2878                                      mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
2879                              mfilter->ethertype == efilter->ether_type &&
2880                              mfilter->flags ==
2881                              HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
2882                                 match = 1;
2883                                 break;
2884                         }
2885         }
2886
2887         if (match)
2888                 *ret = -EEXIST;
2889
2890 exit:
2891         return mfilter;
2892 }
2893
2894 static int
2895 bnxt_ethertype_filter(struct rte_eth_dev *dev,
2896                         enum rte_filter_op filter_op,
2897                         void *arg)
2898 {
2899         struct bnxt *bp = dev->data->dev_private;
2900         struct rte_eth_ethertype_filter *efilter =
2901                         (struct rte_eth_ethertype_filter *)arg;
2902         struct bnxt_filter_info *bfilter, *filter1;
2903         struct bnxt_vnic_info *vnic, *vnic0;
2904         int ret;
2905
2906         if (filter_op == RTE_ETH_FILTER_NOP)
2907                 return 0;
2908
2909         if (arg == NULL) {
2910                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2911                             filter_op);
2912                 return -EINVAL;
2913         }
2914
2915         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
2916         vnic = &bp->vnic_info[efilter->queue];
2917
2918         switch (filter_op) {
2919         case RTE_ETH_FILTER_ADD:
2920                 bnxt_match_and_validate_ether_filter(bp, efilter,
2921                                                         vnic0, vnic, &ret);
2922                 if (ret < 0)
2923                         return ret;
2924
2925                 bfilter = bnxt_get_unused_filter(bp);
2926                 if (bfilter == NULL) {
2927                         PMD_DRV_LOG(ERR,
2928                                 "Not enough resources for a new filter.\n");
2929                         return -ENOMEM;
2930                 }
2931                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2932                 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
2933                        RTE_ETHER_ADDR_LEN);
2934                 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
2935                        RTE_ETHER_ADDR_LEN);
2936                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2937                 bfilter->ethertype = efilter->ether_type;
2938                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2939
2940                 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
2941                 if (filter1 == NULL) {
2942                         ret = -EINVAL;
2943                         goto cleanup;
2944                 }
2945                 bfilter->enables |=
2946                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2947                 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2948
2949                 bfilter->dst_id = vnic->fw_vnic_id;
2950
2951                 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2952                         bfilter->flags =
2953                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2954                 }
2955
2956                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2957                 if (ret)
2958                         goto cleanup;
2959                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2960                 break;
2961         case RTE_ETH_FILTER_DELETE:
2962                 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
2963                                                         vnic0, vnic, &ret);
2964                 if (ret == -EEXIST) {
2965                         ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
2966
2967                         STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
2968                                       next);
2969                         bnxt_free_filter(bp, filter1);
2970                 } else if (ret == 0) {
2971                         PMD_DRV_LOG(ERR, "No matching filter found\n");
2972                 }
2973                 break;
2974         default:
2975                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2976                 ret = -EINVAL;
2977                 goto error;
2978         }
2979         return ret;
2980 cleanup:
2981         bnxt_free_filter(bp, bfilter);
2982 error:
2983         return ret;
2984 }
2985
2986 static inline int
2987 parse_ntuple_filter(struct bnxt *bp,
2988                     struct rte_eth_ntuple_filter *nfilter,
2989                     struct bnxt_filter_info *bfilter)
2990 {
2991         uint32_t en = 0;
2992
2993         if (nfilter->queue >= bp->rx_nr_rings) {
2994                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
2995                 return -EINVAL;
2996         }
2997
2998         switch (nfilter->dst_port_mask) {
2999         case UINT16_MAX:
3000                 bfilter->dst_port_mask = -1;
3001                 bfilter->dst_port = nfilter->dst_port;
3002                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
3003                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3004                 break;
3005         default:
3006                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3007                 return -EINVAL;
3008         }
3009
3010         bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3011         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3012
3013         switch (nfilter->proto_mask) {
3014         case UINT8_MAX:
3015                 if (nfilter->proto == 17) /* IPPROTO_UDP */
3016                         bfilter->ip_protocol = 17;
3017                 else if (nfilter->proto == 6) /* IPPROTO_TCP */
3018                         bfilter->ip_protocol = 6;
3019                 else
3020                         return -EINVAL;
3021                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3022                 break;
3023         default:
3024                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3025                 return -EINVAL;
3026         }
3027
3028         switch (nfilter->dst_ip_mask) {
3029         case UINT32_MAX:
3030                 bfilter->dst_ipaddr_mask[0] = -1;
3031                 bfilter->dst_ipaddr[0] = nfilter->dst_ip;
3032                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
3033                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3034                 break;
3035         default:
3036                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3037                 return -EINVAL;
3038         }
3039
3040         switch (nfilter->src_ip_mask) {
3041         case UINT32_MAX:
3042                 bfilter->src_ipaddr_mask[0] = -1;
3043                 bfilter->src_ipaddr[0] = nfilter->src_ip;
3044                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
3045                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3046                 break;
3047         default:
3048                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3049                 return -EINVAL;
3050         }
3051
3052         switch (nfilter->src_port_mask) {
3053         case UINT16_MAX:
3054                 bfilter->src_port_mask = -1;
3055                 bfilter->src_port = nfilter->src_port;
3056                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
3057                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3058                 break;
3059         default:
3060                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
3061                 return -EINVAL;
3062         }
3063
3064         bfilter->enables = en;
3065         return 0;
3066 }
3067
3068 static struct bnxt_filter_info*
3069 bnxt_match_ntuple_filter(struct bnxt *bp,
3070                          struct bnxt_filter_info *bfilter,
3071                          struct bnxt_vnic_info **mvnic)
3072 {
3073         struct bnxt_filter_info *mfilter = NULL;
3074         int i;
3075
3076         for (i = bp->nr_vnics - 1; i >= 0; i--) {
3077                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3078                 STAILQ_FOREACH(mfilter, &vnic->filter, next) {
3079                         if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
3080                             bfilter->src_ipaddr_mask[0] ==
3081                             mfilter->src_ipaddr_mask[0] &&
3082                             bfilter->src_port == mfilter->src_port &&
3083                             bfilter->src_port_mask == mfilter->src_port_mask &&
3084                             bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
3085                             bfilter->dst_ipaddr_mask[0] ==
3086                             mfilter->dst_ipaddr_mask[0] &&
3087                             bfilter->dst_port == mfilter->dst_port &&
3088                             bfilter->dst_port_mask == mfilter->dst_port_mask &&
3089                             bfilter->flags == mfilter->flags &&
3090                             bfilter->enables == mfilter->enables) {
3091                                 if (mvnic)
3092                                         *mvnic = vnic;
3093                                 return mfilter;
3094                         }
3095                 }
3096         }
3097         return NULL;
3098 }
3099
3100 static int
3101 bnxt_cfg_ntuple_filter(struct bnxt *bp,
3102                        struct rte_eth_ntuple_filter *nfilter,
3103                        enum rte_filter_op filter_op)
3104 {
3105         struct bnxt_filter_info *bfilter, *mfilter, *filter1;
3106         struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
3107         int ret;
3108
3109         if (nfilter->flags != RTE_5TUPLE_FLAGS) {
3110                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
3111                 return -EINVAL;
3112         }
3113
3114         if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
3115                 PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
3116                 return -EINVAL;
3117         }
3118
3119         bfilter = bnxt_get_unused_filter(bp);
3120         if (bfilter == NULL) {
3121                 PMD_DRV_LOG(ERR,
3122                         "Not enough resources for a new filter.\n");
3123                 return -ENOMEM;
3124         }
3125         ret = parse_ntuple_filter(bp, nfilter, bfilter);
3126         if (ret < 0)
3127                 goto free_filter;
3128
3129         vnic = &bp->vnic_info[nfilter->queue];
3130         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3131         filter1 = STAILQ_FIRST(&vnic0->filter);
3132         if (filter1 == NULL) {
3133                 ret = -EINVAL;
3134                 goto free_filter;
3135         }
3136
3137         bfilter->dst_id = vnic->fw_vnic_id;
3138         bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3139         bfilter->enables |=
3140                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3141         bfilter->ethertype = 0x800;
3142         bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3143
3144         mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
3145
3146         if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3147             bfilter->dst_id == mfilter->dst_id) {
3148                 PMD_DRV_LOG(ERR, "filter exists.\n");
3149                 ret = -EEXIST;
3150                 goto free_filter;
3151         } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
3152                    bfilter->dst_id != mfilter->dst_id) {
3153                 mfilter->dst_id = vnic->fw_vnic_id;
3154                 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
3155                 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
3156                 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
3157                 PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
3158                 PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
3159                 goto free_filter;
3160         }
3161         if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3162                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3163                 ret = -ENOENT;
3164                 goto free_filter;
3165         }
3166
3167         if (filter_op == RTE_ETH_FILTER_ADD) {
3168                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3169                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
3170                 if (ret)
3171                         goto free_filter;
3172                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
3173         } else {
3174                 if (mfilter == NULL) {
3175                         /* This should not happen. But for Coverity! */
3176                         ret = -ENOENT;
3177                         goto free_filter;
3178                 }
3179                 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
3180
3181                 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
3182                 bnxt_free_filter(bp, mfilter);
3183                 bnxt_free_filter(bp, bfilter);
3184         }
3185
3186         return 0;
3187 free_filter:
3188         bnxt_free_filter(bp, bfilter);
3189         return ret;
3190 }
3191
3192 static int
3193 bnxt_ntuple_filter(struct rte_eth_dev *dev,
3194                         enum rte_filter_op filter_op,
3195                         void *arg)
3196 {
3197         struct bnxt *bp = dev->data->dev_private;
3198         int ret;
3199
3200         if (filter_op == RTE_ETH_FILTER_NOP)
3201                 return 0;
3202
3203         if (arg == NULL) {
3204                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
3205                             filter_op);
3206                 return -EINVAL;
3207         }
3208
3209         switch (filter_op) {
3210         case RTE_ETH_FILTER_ADD:
3211                 ret = bnxt_cfg_ntuple_filter(bp,
3212                         (struct rte_eth_ntuple_filter *)arg,
3213                         filter_op);
3214                 break;
3215         case RTE_ETH_FILTER_DELETE:
3216                 ret = bnxt_cfg_ntuple_filter(bp,
3217                         (struct rte_eth_ntuple_filter *)arg,
3218                         filter_op);
3219                 break;
3220         default:
3221                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
3222                 ret = -EINVAL;
3223                 break;
3224         }
3225         return ret;
3226 }
3227
3228 static int
3229 bnxt_parse_fdir_filter(struct bnxt *bp,
3230                        struct rte_eth_fdir_filter *fdir,
3231                        struct bnxt_filter_info *filter)
3232 {
3233         enum rte_fdir_mode fdir_mode =
3234                 bp->eth_dev->data->dev_conf.fdir_conf.mode;
3235         struct bnxt_vnic_info *vnic0, *vnic;
3236         struct bnxt_filter_info *filter1;
3237         uint32_t en = 0;
3238         int i;
3239
3240         if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3241                 return -EINVAL;
3242
3243         filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
3244         en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
3245
3246         switch (fdir->input.flow_type) {
3247         case RTE_ETH_FLOW_IPV4:
3248         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
3249                 /* FALLTHROUGH */
3250                 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
3251                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3252                 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
3253                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3254                 filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
3255                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3256                 filter->ip_addr_type =
3257                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3258                 filter->src_ipaddr_mask[0] = 0xffffffff;
3259                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3260                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3261                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3262                 filter->ethertype = 0x800;
3263                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3264                 break;
3265         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
3266                 filter->src_port = fdir->input.flow.tcp4_flow.src_port;
3267                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3268                 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
3269                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3270                 filter->dst_port_mask = 0xffff;
3271                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3272                 filter->src_port_mask = 0xffff;
3273                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3274                 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
3275                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3276                 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
3277                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3278                 filter->ip_protocol = 6;
3279                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3280                 filter->ip_addr_type =
3281                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3282                 filter->src_ipaddr_mask[0] = 0xffffffff;
3283                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3284                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3285                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3286                 filter->ethertype = 0x800;
3287                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3288                 break;
3289         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
3290                 filter->src_port = fdir->input.flow.udp4_flow.src_port;
3291                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3292                 filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
3293                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3294                 filter->dst_port_mask = 0xffff;
3295                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3296                 filter->src_port_mask = 0xffff;
3297                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3298                 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
3299                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3300                 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
3301                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3302                 filter->ip_protocol = 17;
3303                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3304                 filter->ip_addr_type =
3305                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
3306                 filter->src_ipaddr_mask[0] = 0xffffffff;
3307                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3308                 filter->dst_ipaddr_mask[0] = 0xffffffff;
3309                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3310                 filter->ethertype = 0x800;
3311                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3312                 break;
3313         case RTE_ETH_FLOW_IPV6:
3314         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
3315                 /* FALLTHROUGH */
3316                 filter->ip_addr_type =
3317                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3318                 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
3319                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3320                 rte_memcpy(filter->src_ipaddr,
3321                            fdir->input.flow.ipv6_flow.src_ip, 16);
3322                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3323                 rte_memcpy(filter->dst_ipaddr,
3324                            fdir->input.flow.ipv6_flow.dst_ip, 16);
3325                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3326                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3327                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3328                 memset(filter->src_ipaddr_mask, 0xff, 16);
3329                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3330                 filter->ethertype = 0x86dd;
3331                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3332                 break;
3333         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
3334                 filter->src_port = fdir->input.flow.tcp6_flow.src_port;
3335                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3336                 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
3337                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3338                 filter->dst_port_mask = 0xffff;
3339                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3340                 filter->src_port_mask = 0xffff;
3341                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3342                 filter->ip_addr_type =
3343                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3344                 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
3345                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3346                 rte_memcpy(filter->src_ipaddr,
3347                            fdir->input.flow.tcp6_flow.ip.src_ip, 16);
3348                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3349                 rte_memcpy(filter->dst_ipaddr,
3350                            fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
3351                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3352                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3353                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3354                 memset(filter->src_ipaddr_mask, 0xff, 16);
3355                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3356                 filter->ethertype = 0x86dd;
3357                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3358                 break;
3359         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
3360                 filter->src_port = fdir->input.flow.udp6_flow.src_port;
3361                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
3362                 filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
3363                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
3364                 filter->dst_port_mask = 0xffff;
3365                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
3366                 filter->src_port_mask = 0xffff;
3367                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
3368                 filter->ip_addr_type =
3369                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
3370                 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
3371                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
3372                 rte_memcpy(filter->src_ipaddr,
3373                            fdir->input.flow.udp6_flow.ip.src_ip, 16);
3374                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
3375                 rte_memcpy(filter->dst_ipaddr,
3376                            fdir->input.flow.udp6_flow.ip.dst_ip, 16);
3377                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
3378                 memset(filter->dst_ipaddr_mask, 0xff, 16);
3379                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
3380                 memset(filter->src_ipaddr_mask, 0xff, 16);
3381                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
3382                 filter->ethertype = 0x86dd;
3383                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3384                 break;
3385         case RTE_ETH_FLOW_L2_PAYLOAD:
3386                 filter->ethertype = fdir->input.flow.l2_flow.ether_type;
3387                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
3388                 break;
3389         case RTE_ETH_FLOW_VXLAN:
3390                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3391                         return -EINVAL;
3392                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3393                 filter->tunnel_type =
3394                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
3395                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3396                 break;
3397         case RTE_ETH_FLOW_NVGRE:
3398                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3399                         return -EINVAL;
3400                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
3401                 filter->tunnel_type =
3402                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
3403                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
3404                 break;
3405         case RTE_ETH_FLOW_UNKNOWN:
3406         case RTE_ETH_FLOW_RAW:
3407         case RTE_ETH_FLOW_FRAG_IPV4:
3408         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
3409         case RTE_ETH_FLOW_FRAG_IPV6:
3410         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
3411         case RTE_ETH_FLOW_IPV6_EX:
3412         case RTE_ETH_FLOW_IPV6_TCP_EX:
3413         case RTE_ETH_FLOW_IPV6_UDP_EX:
3414         case RTE_ETH_FLOW_GENEVE:
3415                 /* FALLTHROUGH */
3416         default:
3417                 return -EINVAL;
3418         }
3419
3420         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
3421         vnic = &bp->vnic_info[fdir->action.rx_queue];
3422         if (vnic == NULL) {
3423                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
3424                 return -EINVAL;
3425         }
3426
3427         if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3428                 rte_memcpy(filter->dst_macaddr,
3429                         fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
3430                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
3431         }
3432
3433         if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
3434                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
3435                 filter1 = STAILQ_FIRST(&vnic0->filter);
3436                 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
3437         } else {
3438                 filter->dst_id = vnic->fw_vnic_id;
3439                 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
3440                         if (filter->dst_macaddr[i] == 0x00)
3441                                 filter1 = STAILQ_FIRST(&vnic0->filter);
3442                         else
3443                                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
3444         }
3445
3446         if (filter1 == NULL)
3447                 return -EINVAL;
3448
3449         en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
3450         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
3451
3452         filter->enables = en;
3453
3454         return 0;
3455 }
3456
3457 static struct bnxt_filter_info *
3458 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
3459                 struct bnxt_vnic_info **mvnic)
3460 {
3461         struct bnxt_filter_info *mf = NULL;
3462         int i;
3463
3464         for (i = bp->nr_vnics - 1; i >= 0; i--) {
3465                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3466
3467                 STAILQ_FOREACH(mf, &vnic->filter, next) {
3468                         if (mf->filter_type == nf->filter_type &&
3469                             mf->flags == nf->flags &&
3470                             mf->src_port == nf->src_port &&
3471                             mf->src_port_mask == nf->src_port_mask &&
3472                             mf->dst_port == nf->dst_port &&
3473                             mf->dst_port_mask == nf->dst_port_mask &&
3474                             mf->ip_protocol == nf->ip_protocol &&
3475                             mf->ip_addr_type == nf->ip_addr_type &&
3476                             mf->ethertype == nf->ethertype &&
3477                             mf->vni == nf->vni &&
3478                             mf->tunnel_type == nf->tunnel_type &&
3479                             mf->l2_ovlan == nf->l2_ovlan &&
3480                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
3481                             mf->l2_ivlan == nf->l2_ivlan &&
3482                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
3483                             !memcmp(mf->l2_addr, nf->l2_addr,
3484                                     RTE_ETHER_ADDR_LEN) &&
3485                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
3486                                     RTE_ETHER_ADDR_LEN) &&
3487                             !memcmp(mf->src_macaddr, nf->src_macaddr,
3488                                     RTE_ETHER_ADDR_LEN) &&
3489                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
3490                                     RTE_ETHER_ADDR_LEN) &&
3491                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
3492                                     sizeof(nf->src_ipaddr)) &&
3493                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
3494                                     sizeof(nf->src_ipaddr_mask)) &&
3495                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
3496                                     sizeof(nf->dst_ipaddr)) &&
3497                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
3498                                     sizeof(nf->dst_ipaddr_mask))) {
3499                                 if (mvnic)
3500                                         *mvnic = vnic;
3501                                 return mf;
3502                         }
3503                 }
3504         }
3505         return NULL;
3506 }
3507
3508 static int
3509 bnxt_fdir_filter(struct rte_eth_dev *dev,
3510                  enum rte_filter_op filter_op,
3511                  void *arg)
3512 {
3513         struct bnxt *bp = dev->data->dev_private;
3514         struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
3515         struct bnxt_filter_info *filter, *match;
3516         struct bnxt_vnic_info *vnic, *mvnic;
3517         int ret = 0, i;
3518
3519         if (filter_op == RTE_ETH_FILTER_NOP)
3520                 return 0;
3521
3522         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
3523                 return -EINVAL;
3524
3525         switch (filter_op) {
3526         case RTE_ETH_FILTER_ADD:
3527         case RTE_ETH_FILTER_DELETE:
3528                 /* FALLTHROUGH */
3529                 filter = bnxt_get_unused_filter(bp);
3530                 if (filter == NULL) {
3531                         PMD_DRV_LOG(ERR,
3532                                 "Not enough resources for a new flow.\n");
3533                         return -ENOMEM;
3534                 }
3535
3536                 ret = bnxt_parse_fdir_filter(bp, fdir, filter);
3537                 if (ret != 0)
3538                         goto free_filter;
3539                 filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
3540
3541                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
3542                         vnic = &bp->vnic_info[0];
3543                 else
3544                         vnic = &bp->vnic_info[fdir->action.rx_queue];
3545
3546                 match = bnxt_match_fdir(bp, filter, &mvnic);
3547                 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
3548                         if (match->dst_id == vnic->fw_vnic_id) {
3549                                 PMD_DRV_LOG(ERR, "Flow already exists.\n");
3550                                 ret = -EEXIST;
3551                                 goto free_filter;
3552                         } else {
3553                                 match->dst_id = vnic->fw_vnic_id;
3554                                 ret = bnxt_hwrm_set_ntuple_filter(bp,
3555                                                                   match->dst_id,
3556                                                                   match);
3557                                 STAILQ_REMOVE(&mvnic->filter, match,
3558                                               bnxt_filter_info, next);
3559                                 STAILQ_INSERT_TAIL(&vnic->filter, match, next);
3560                                 PMD_DRV_LOG(ERR,
3561                                         "Filter with matching pattern exist\n");
3562                                 PMD_DRV_LOG(ERR,
3563                                         "Updated it to new destination q\n");
3564                                 goto free_filter;
3565                         }
3566                 }
3567                 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
3568                         PMD_DRV_LOG(ERR, "Flow does not exist.\n");
3569                         ret = -ENOENT;
3570                         goto free_filter;
3571                 }
3572
3573                 if (filter_op == RTE_ETH_FILTER_ADD) {
3574                         ret = bnxt_hwrm_set_ntuple_filter(bp,
3575                                                           filter->dst_id,
3576                                                           filter);
3577                         if (ret)
3578                                 goto free_filter;
3579                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
3580                 } else {
3581                         ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
3582                         STAILQ_REMOVE(&vnic->filter, match,
3583                                       bnxt_filter_info, next);
3584                         bnxt_free_filter(bp, match);
3585                         bnxt_free_filter(bp, filter);
3586                 }
3587                 break;
3588         case RTE_ETH_FILTER_FLUSH:
3589                 for (i = bp->nr_vnics - 1; i >= 0; i--) {
3590                         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3591
3592                         STAILQ_FOREACH(filter, &vnic->filter, next) {
3593                                 if (filter->filter_type ==
3594                                     HWRM_CFA_NTUPLE_FILTER) {
3595                                         ret =
3596                                         bnxt_hwrm_clear_ntuple_filter(bp,
3597                                                                       filter);
3598                                         STAILQ_REMOVE(&vnic->filter, filter,
3599                                                       bnxt_filter_info, next);
3600                                 }
3601                         }
3602                 }
3603                 return ret;
3604         case RTE_ETH_FILTER_UPDATE:
3605         case RTE_ETH_FILTER_STATS:
3606         case RTE_ETH_FILTER_INFO:
3607                 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
3608                 break;
3609         default:
3610                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3611                 ret = -EINVAL;
3612                 break;
3613         }
3614         return ret;
3615
3616 free_filter:
3617         bnxt_free_filter(bp, filter);
3618         return ret;
3619 }
3620
3621 static int
3622 bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
3623                     enum rte_filter_type filter_type,
3624                     enum rte_filter_op filter_op, void *arg)
3625 {
3626         struct bnxt *bp = dev->data->dev_private;
3627         int ret = 0;
3628
3629         ret = is_bnxt_in_error(dev->data->dev_private);
3630         if (ret)
3631                 return ret;
3632
3633         switch (filter_type) {
3634         case RTE_ETH_FILTER_TUNNEL:
3635                 PMD_DRV_LOG(ERR,
3636                         "filter type: %d: To be implemented\n", filter_type);
3637                 break;
3638         case RTE_ETH_FILTER_FDIR:
3639                 ret = bnxt_fdir_filter(dev, filter_op, arg);
3640                 break;
3641         case RTE_ETH_FILTER_NTUPLE:
3642                 ret = bnxt_ntuple_filter(dev, filter_op, arg);
3643                 break;
3644         case RTE_ETH_FILTER_ETHERTYPE:
3645                 ret = bnxt_ethertype_filter(dev, filter_op, arg);
3646                 break;
3647         case RTE_ETH_FILTER_GENERIC:
3648                 if (filter_op != RTE_ETH_FILTER_GET)
3649                         return -EINVAL;
3650                 if (BNXT_TRUFLOW_EN(bp))
3651                         *(const void **)arg = &bnxt_ulp_rte_flow_ops;
3652                 else
3653                         *(const void **)arg = &bnxt_flow_ops;
3654                 break;
3655         default:
3656                 PMD_DRV_LOG(ERR,
3657                         "Filter type (%d) not supported", filter_type);
3658                 ret = -EINVAL;
3659                 break;
3660         }
3661         return ret;
3662 }
3663
3664 static const uint32_t *
3665 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
3666 {
3667         static const uint32_t ptypes[] = {
3668                 RTE_PTYPE_L2_ETHER_VLAN,
3669                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
3670                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
3671                 RTE_PTYPE_L4_ICMP,
3672                 RTE_PTYPE_L4_TCP,
3673                 RTE_PTYPE_L4_UDP,
3674                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
3675                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
3676                 RTE_PTYPE_INNER_L4_ICMP,
3677                 RTE_PTYPE_INNER_L4_TCP,
3678                 RTE_PTYPE_INNER_L4_UDP,
3679                 RTE_PTYPE_UNKNOWN
3680         };
3681
3682         if (!dev->rx_pkt_burst)
3683                 return NULL;
3684
3685         return ptypes;
3686 }
3687
3688 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
3689                          int reg_win)
3690 {
3691         uint32_t reg_base = *reg_arr & 0xfffff000;
3692         uint32_t win_off;
3693         int i;
3694
3695         for (i = 0; i < count; i++) {
3696                 if ((reg_arr[i] & 0xfffff000) != reg_base)
3697                         return -ERANGE;
3698         }
3699         win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
3700         rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off);
3701         return 0;
3702 }
3703
3704 static int bnxt_map_ptp_regs(struct bnxt *bp)
3705 {
3706         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3707         uint32_t *reg_arr;
3708         int rc, i;
3709
3710         reg_arr = ptp->rx_regs;
3711         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
3712         if (rc)
3713                 return rc;
3714
3715         reg_arr = ptp->tx_regs;
3716         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
3717         if (rc)
3718                 return rc;
3719
3720         for (i = 0; i < BNXT_PTP_RX_REGS; i++)
3721                 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
3722
3723         for (i = 0; i < BNXT_PTP_TX_REGS; i++)
3724                 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
3725
3726         return 0;
3727 }
3728
3729 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
3730 {
3731         rte_write32(0, (uint8_t *)bp->bar0 +
3732                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16);
3733         rte_write32(0, (uint8_t *)bp->bar0 +
3734                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20);
3735 }
3736
3737 static uint64_t bnxt_cc_read(struct bnxt *bp)
3738 {
3739         uint64_t ns;
3740
3741         ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3742                               BNXT_GRCPF_REG_SYNC_TIME));
3743         ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3744                                           BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
3745         return ns;
3746 }
3747
3748 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
3749 {
3750         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3751         uint32_t fifo;
3752
3753         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3754                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3755         if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
3756                 return -EAGAIN;
3757
3758         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3759                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
3760         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3761                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
3762         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3763                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
3764
3765         return 0;
3766 }
3767
3768 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
3769 {
3770         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3771         struct bnxt_pf_info *pf = bp->pf;
3772         uint16_t port_id;
3773         uint32_t fifo;
3774
3775         if (!ptp)
3776                 return -ENODEV;
3777
3778         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3779                                 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3780         if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
3781                 return -EAGAIN;
3782
3783         port_id = pf->port_id;
3784         rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
3785                ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
3786
3787         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3788                                    ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
3789         if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
3790 /*              bnxt_clr_rx_ts(bp);       TBD  */
3791                 return -EBUSY;
3792         }
3793
3794         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3795                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
3796         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
3797                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
3798
3799         return 0;
3800 }
3801
3802 static int
3803 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3804 {
3805         uint64_t ns;
3806         struct bnxt *bp = dev->data->dev_private;
3807         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3808
3809         if (!ptp)
3810                 return 0;
3811
3812         ns = rte_timespec_to_ns(ts);
3813         /* Set the timecounters to a new value. */
3814         ptp->tc.nsec = ns;
3815
3816         return 0;
3817 }
3818
3819 static int
3820 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3821 {
3822         struct bnxt *bp = dev->data->dev_private;
3823         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3824         uint64_t ns, systime_cycles = 0;
3825         int rc = 0;
3826
3827         if (!ptp)
3828                 return 0;
3829
3830         if (BNXT_CHIP_THOR(bp))
3831                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
3832                                              &systime_cycles);
3833         else
3834                 systime_cycles = bnxt_cc_read(bp);
3835
3836         ns = rte_timecounter_update(&ptp->tc, systime_cycles);
3837         *ts = rte_ns_to_timespec(ns);
3838
3839         return rc;
3840 }
3841 static int
3842 bnxt_timesync_enable(struct rte_eth_dev *dev)
3843 {
3844         struct bnxt *bp = dev->data->dev_private;
3845         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3846         uint32_t shift = 0;
3847         int rc;
3848
3849         if (!ptp)
3850                 return 0;
3851
3852         ptp->rx_filter = 1;
3853         ptp->tx_tstamp_en = 1;
3854         ptp->rxctl = BNXT_PTP_MSG_EVENTS;
3855
3856         rc = bnxt_hwrm_ptp_cfg(bp);
3857         if (rc)
3858                 return rc;
3859
3860         memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
3861         memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3862         memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3863
3864         ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3865         ptp->tc.cc_shift = shift;
3866         ptp->tc.nsec_mask = (1ULL << shift) - 1;
3867
3868         ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3869         ptp->rx_tstamp_tc.cc_shift = shift;
3870         ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3871
3872         ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
3873         ptp->tx_tstamp_tc.cc_shift = shift;
3874         ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3875
3876         if (!BNXT_CHIP_THOR(bp))
3877                 bnxt_map_ptp_regs(bp);
3878
3879         return 0;
3880 }
3881
3882 static int
3883 bnxt_timesync_disable(struct rte_eth_dev *dev)
3884 {
3885         struct bnxt *bp = dev->data->dev_private;
3886         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3887
3888         if (!ptp)
3889                 return 0;
3890
3891         ptp->rx_filter = 0;
3892         ptp->tx_tstamp_en = 0;
3893         ptp->rxctl = 0;
3894
3895         bnxt_hwrm_ptp_cfg(bp);
3896
3897         if (!BNXT_CHIP_THOR(bp))
3898                 bnxt_unmap_ptp_regs(bp);
3899
3900         return 0;
3901 }
3902
3903 static int
3904 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3905                                  struct timespec *timestamp,
3906                                  uint32_t flags __rte_unused)
3907 {
3908         struct bnxt *bp = dev->data->dev_private;
3909         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3910         uint64_t rx_tstamp_cycles = 0;
3911         uint64_t ns;
3912
3913         if (!ptp)
3914                 return 0;
3915
3916         if (BNXT_CHIP_THOR(bp))
3917                 rx_tstamp_cycles = ptp->rx_timestamp;
3918         else
3919                 bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
3920
3921         ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
3922         *timestamp = rte_ns_to_timespec(ns);
3923         return  0;
3924 }
3925
3926 static int
3927 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3928                                  struct timespec *timestamp)
3929 {
3930         struct bnxt *bp = dev->data->dev_private;
3931         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3932         uint64_t tx_tstamp_cycles = 0;
3933         uint64_t ns;
3934         int rc = 0;
3935
3936         if (!ptp)
3937                 return 0;
3938
3939         if (BNXT_CHIP_THOR(bp))
3940                 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX,
3941                                              &tx_tstamp_cycles);
3942         else
3943                 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
3944
3945         ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
3946         *timestamp = rte_ns_to_timespec(ns);
3947
3948         return rc;
3949 }
3950
3951 static int
3952 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3953 {
3954         struct bnxt *bp = dev->data->dev_private;
3955         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
3956
3957         if (!ptp)
3958                 return 0;
3959
3960         ptp->tc.nsec += delta;
3961
3962         return 0;
3963 }
3964
3965 static int
3966 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
3967 {
3968         struct bnxt *bp = dev->data->dev_private;
3969         int rc;
3970         uint32_t dir_entries;
3971         uint32_t entry_length;
3972
3973         rc = is_bnxt_in_error(bp);
3974         if (rc)
3975                 return rc;
3976
3977         PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n",
3978                     bp->pdev->addr.domain, bp->pdev->addr.bus,
3979                     bp->pdev->addr.devid, bp->pdev->addr.function);
3980
3981         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3982         if (rc != 0)
3983                 return rc;
3984
3985         return dir_entries * entry_length;
3986 }
3987
3988 static int
3989 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
3990                 struct rte_dev_eeprom_info *in_eeprom)
3991 {
3992         struct bnxt *bp = dev->data->dev_private;
3993         uint32_t index;
3994         uint32_t offset;
3995         int rc;
3996
3997         rc = is_bnxt_in_error(bp);
3998         if (rc)
3999                 return rc;
4000
4001         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
4002                     bp->pdev->addr.domain, bp->pdev->addr.bus,
4003                     bp->pdev->addr.devid, bp->pdev->addr.function,
4004                     in_eeprom->offset, in_eeprom->length);
4005
4006         if (in_eeprom->offset == 0) /* special offset value to get directory */
4007                 return bnxt_get_nvram_directory(bp, in_eeprom->length,
4008                                                 in_eeprom->data);
4009
4010         index = in_eeprom->offset >> 24;
4011         offset = in_eeprom->offset & 0xffffff;
4012
4013         if (index != 0)
4014                 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
4015                                            in_eeprom->length, in_eeprom->data);
4016
4017         return 0;
4018 }
4019
4020 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
4021 {
4022         switch (dir_type) {
4023         case BNX_DIR_TYPE_CHIMP_PATCH:
4024         case BNX_DIR_TYPE_BOOTCODE:
4025         case BNX_DIR_TYPE_BOOTCODE_2:
4026         case BNX_DIR_TYPE_APE_FW:
4027         case BNX_DIR_TYPE_APE_PATCH:
4028         case BNX_DIR_TYPE_KONG_FW:
4029         case BNX_DIR_TYPE_KONG_PATCH:
4030         case BNX_DIR_TYPE_BONO_FW:
4031         case BNX_DIR_TYPE_BONO_PATCH:
4032                 /* FALLTHROUGH */
4033                 return true;
4034         }
4035
4036         return false;
4037 }
4038
4039 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
4040 {
4041         switch (dir_type) {
4042         case BNX_DIR_TYPE_AVS:
4043         case BNX_DIR_TYPE_EXP_ROM_MBA:
4044         case BNX_DIR_TYPE_PCIE:
4045         case BNX_DIR_TYPE_TSCF_UCODE:
4046         case BNX_DIR_TYPE_EXT_PHY:
4047         case BNX_DIR_TYPE_CCM:
4048         case BNX_DIR_TYPE_ISCSI_BOOT:
4049         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
4050         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
4051                 /* FALLTHROUGH */
4052                 return true;
4053         }
4054
4055         return false;
4056 }
4057
4058 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
4059 {
4060         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
4061                 bnxt_dir_type_is_other_exec_format(dir_type);
4062 }
4063
4064 static int
4065 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
4066                 struct rte_dev_eeprom_info *in_eeprom)
4067 {
4068         struct bnxt *bp = dev->data->dev_private;
4069         uint8_t index, dir_op;
4070         uint16_t type, ext, ordinal, attr;
4071         int rc;
4072
4073         rc = is_bnxt_in_error(bp);
4074         if (rc)
4075                 return rc;
4076
4077         PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n",
4078                     bp->pdev->addr.domain, bp->pdev->addr.bus,
4079                     bp->pdev->addr.devid, bp->pdev->addr.function,
4080                     in_eeprom->offset, in_eeprom->length);
4081
4082         if (!BNXT_PF(bp)) {
4083                 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
4084                 return -EINVAL;
4085         }
4086
4087         type = in_eeprom->magic >> 16;
4088
4089         if (type == 0xffff) { /* special value for directory operations */
4090                 index = in_eeprom->magic & 0xff;
4091                 dir_op = in_eeprom->magic >> 8;
4092                 if (index == 0)
4093                         return -EINVAL;
4094                 switch (dir_op) {
4095                 case 0x0e: /* erase */
4096                         if (in_eeprom->offset != ~in_eeprom->magic)
4097                                 return -EINVAL;
4098                         return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
4099                 default:
4100                         return -EINVAL;
4101                 }
4102         }
4103
4104         /* Create or re-write an NVM item: */
4105         if (bnxt_dir_type_is_executable(type) == true)
4106                 return -EOPNOTSUPP;
4107         ext = in_eeprom->magic & 0xffff;
4108         ordinal = in_eeprom->offset >> 16;
4109         attr = in_eeprom->offset & 0xffff;
4110
4111         return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
4112                                      in_eeprom->data, in_eeprom->length);
4113 }
4114
4115 /*
4116  * Initialization
4117  */
4118
4119 static const struct eth_dev_ops bnxt_dev_ops = {
4120         .dev_infos_get = bnxt_dev_info_get_op,
4121         .dev_close = bnxt_dev_close_op,
4122         .dev_configure = bnxt_dev_configure_op,
4123         .dev_start = bnxt_dev_start_op,
4124         .dev_stop = bnxt_dev_stop_op,
4125         .dev_set_link_up = bnxt_dev_set_link_up_op,
4126         .dev_set_link_down = bnxt_dev_set_link_down_op,
4127         .stats_get = bnxt_stats_get_op,
4128         .stats_reset = bnxt_stats_reset_op,
4129         .rx_queue_setup = bnxt_rx_queue_setup_op,
4130         .rx_queue_release = bnxt_rx_queue_release_op,
4131         .tx_queue_setup = bnxt_tx_queue_setup_op,
4132         .tx_queue_release = bnxt_tx_queue_release_op,
4133         .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
4134         .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
4135         .reta_update = bnxt_reta_update_op,
4136         .reta_query = bnxt_reta_query_op,
4137         .rss_hash_update = bnxt_rss_hash_update_op,
4138         .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
4139         .link_update = bnxt_link_update_op,
4140         .promiscuous_enable = bnxt_promiscuous_enable_op,
4141         .promiscuous_disable = bnxt_promiscuous_disable_op,
4142         .allmulticast_enable = bnxt_allmulticast_enable_op,
4143         .allmulticast_disable = bnxt_allmulticast_disable_op,
4144         .mac_addr_add = bnxt_mac_addr_add_op,
4145         .mac_addr_remove = bnxt_mac_addr_remove_op,
4146         .flow_ctrl_get = bnxt_flow_ctrl_get_op,
4147         .flow_ctrl_set = bnxt_flow_ctrl_set_op,
4148         .udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
4149         .udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
4150         .vlan_filter_set = bnxt_vlan_filter_set_op,
4151         .vlan_offload_set = bnxt_vlan_offload_set_op,
4152         .vlan_tpid_set = bnxt_vlan_tpid_set_op,
4153         .vlan_pvid_set = bnxt_vlan_pvid_set_op,
4154         .mtu_set = bnxt_mtu_set_op,
4155         .mac_addr_set = bnxt_set_default_mac_addr_op,
4156         .xstats_get = bnxt_dev_xstats_get_op,
4157         .xstats_get_names = bnxt_dev_xstats_get_names_op,
4158         .xstats_reset = bnxt_dev_xstats_reset_op,
4159         .fw_version_get = bnxt_fw_version_get,
4160         .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
4161         .rxq_info_get = bnxt_rxq_info_get_op,
4162         .txq_info_get = bnxt_txq_info_get_op,
4163         .dev_led_on = bnxt_dev_led_on_op,
4164         .dev_led_off = bnxt_dev_led_off_op,
4165         .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
4166         .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
4167         .rx_queue_count = bnxt_rx_queue_count_op,
4168         .rx_descriptor_status = bnxt_rx_descriptor_status_op,
4169         .tx_descriptor_status = bnxt_tx_descriptor_status_op,
4170         .rx_queue_start = bnxt_rx_queue_start,
4171         .rx_queue_stop = bnxt_rx_queue_stop,
4172         .tx_queue_start = bnxt_tx_queue_start,
4173         .tx_queue_stop = bnxt_tx_queue_stop,
4174         .filter_ctrl = bnxt_filter_ctrl_op,
4175         .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
4176         .get_eeprom_length    = bnxt_get_eeprom_length_op,
4177         .get_eeprom           = bnxt_get_eeprom_op,
4178         .set_eeprom           = bnxt_set_eeprom_op,
4179         .timesync_enable      = bnxt_timesync_enable,
4180         .timesync_disable     = bnxt_timesync_disable,
4181         .timesync_read_time   = bnxt_timesync_read_time,
4182         .timesync_write_time   = bnxt_timesync_write_time,
4183         .timesync_adjust_time = bnxt_timesync_adjust_time,
4184         .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
4185         .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
4186 };
4187
4188 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg)
4189 {
4190         uint32_t offset;
4191
4192         /* Only pre-map the reset GRC registers using window 3 */
4193         rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 +
4194                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8);
4195
4196         offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc);
4197
4198         return offset;
4199 }
4200
4201 int bnxt_map_fw_health_status_regs(struct bnxt *bp)
4202 {
4203         struct bnxt_error_recovery_info *info = bp->recovery_info;
4204         uint32_t reg_base = 0xffffffff;
4205         int i;
4206
4207         /* Only pre-map the monitoring GRC registers using window 2 */
4208         for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) {
4209                 uint32_t reg = info->status_regs[i];
4210
4211                 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC)
4212                         continue;
4213
4214                 if (reg_base == 0xffffffff)
4215                         reg_base = reg & 0xfffff000;
4216                 if ((reg & 0xfffff000) != reg_base)
4217                         return -ERANGE;
4218
4219                 /* Use mask 0xffc as the Lower 2 bits indicates
4220                  * address space location
4221                  */
4222                 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE +
4223                                                 (reg & 0xffc);
4224         }
4225
4226         if (reg_base == 0xffffffff)
4227                 return 0;
4228
4229         rte_write32(reg_base, (uint8_t *)bp->bar0 +
4230                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
4231
4232         return 0;
4233 }
4234
4235 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index)
4236 {
4237         struct bnxt_error_recovery_info *info = bp->recovery_info;
4238         uint32_t delay = info->delay_after_reset[index];
4239         uint32_t val = info->reset_reg_val[index];
4240         uint32_t reg = info->reset_reg[index];
4241         uint32_t type, offset;
4242
4243         type = BNXT_FW_STATUS_REG_TYPE(reg);
4244         offset = BNXT_FW_STATUS_REG_OFF(reg);
4245
4246         switch (type) {
4247         case BNXT_FW_STATUS_REG_TYPE_CFG:
4248                 rte_pci_write_config(bp->pdev, &val, sizeof(val), offset);
4249                 break;
4250         case BNXT_FW_STATUS_REG_TYPE_GRC:
4251                 offset = bnxt_map_reset_regs(bp, offset);
4252                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
4253                 break;
4254         case BNXT_FW_STATUS_REG_TYPE_BAR0:
4255                 rte_write32(val, (uint8_t *)bp->bar0 + offset);
4256                 break;
4257         }
4258         /* wait on a specific interval of time until core reset is complete */
4259         if (delay)
4260                 rte_delay_ms(delay);
4261 }
4262
4263 static void bnxt_dev_cleanup(struct bnxt *bp)
4264 {
4265         bnxt_set_hwrm_link_config(bp, false);
4266         bp->link_info->link_up = 0;
4267         if (bp->eth_dev->data->dev_started)
4268                 bnxt_dev_stop_op(bp->eth_dev);
4269
4270         bnxt_uninit_resources(bp, true);
4271 }
4272
4273 static int bnxt_restore_vlan_filters(struct bnxt *bp)
4274 {
4275         struct rte_eth_dev *dev = bp->eth_dev;
4276         struct rte_vlan_filter_conf *vfc;
4277         int vidx, vbit, rc;
4278         uint16_t vlan_id;
4279
4280         for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) {
4281                 vfc = &dev->data->vlan_filter_conf;
4282                 vidx = vlan_id / 64;
4283                 vbit = vlan_id % 64;
4284
4285                 /* Each bit corresponds to a VLAN id */
4286                 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) {
4287                         rc = bnxt_add_vlan_filter(bp, vlan_id);
4288                         if (rc)
4289                                 return rc;
4290                 }
4291         }
4292
4293         return 0;
4294 }
4295
4296 static int bnxt_restore_mac_filters(struct bnxt *bp)
4297 {
4298         struct rte_eth_dev *dev = bp->eth_dev;
4299         struct rte_eth_dev_info dev_info;
4300         struct rte_ether_addr *addr;
4301         uint64_t pool_mask;
4302         uint32_t pool = 0;
4303         uint16_t i;
4304         int rc;
4305
4306         if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp))
4307                 return 0;
4308
4309         rc = bnxt_dev_info_get_op(dev, &dev_info);
4310         if (rc)
4311                 return rc;
4312
4313         /* replay MAC address configuration */
4314         for (i = 1; i < dev_info.max_mac_addrs; i++) {
4315                 addr = &dev->data->mac_addrs[i];
4316
4317                 /* skip zero address */
4318                 if (rte_is_zero_ether_addr(addr))
4319                         continue;
4320
4321                 pool = 0;
4322                 pool_mask = dev->data->mac_pool_sel[i];
4323
4324                 do {
4325                         if (pool_mask & 1ULL) {
4326                                 rc = bnxt_mac_addr_add_op(dev, addr, i, pool);
4327                                 if (rc)
4328                                         return rc;
4329                         }
4330                         pool_mask >>= 1;
4331                         pool++;
4332                 } while (pool_mask);
4333         }
4334
4335         return 0;
4336 }
4337
4338 static int bnxt_restore_filters(struct bnxt *bp)
4339 {
4340         struct rte_eth_dev *dev = bp->eth_dev;
4341         int ret = 0;
4342
4343         if (dev->data->all_multicast) {
4344                 ret = bnxt_allmulticast_enable_op(dev);
4345                 if (ret)
4346                         return ret;
4347         }
4348         if (dev->data->promiscuous) {
4349                 ret = bnxt_promiscuous_enable_op(dev);
4350                 if (ret)
4351                         return ret;
4352         }
4353
4354         ret = bnxt_restore_mac_filters(bp);
4355         if (ret)
4356                 return ret;
4357
4358         ret = bnxt_restore_vlan_filters(bp);
4359         /* TODO restore other filters as well */
4360         return ret;
4361 }
4362
4363 static void bnxt_dev_recover(void *arg)
4364 {
4365         struct bnxt *bp = arg;
4366         int timeout = bp->fw_reset_max_msecs;
4367         int rc = 0;
4368
4369         /* Clear Error flag so that device re-init should happen */
4370         bp->flags &= ~BNXT_FLAG_FATAL_ERROR;
4371
4372         do {
4373                 rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT);
4374                 if (rc == 0)
4375                         break;
4376                 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL);
4377                 timeout -= BNXT_FW_READY_WAIT_INTERVAL;
4378         } while (rc && timeout);
4379
4380         if (rc) {
4381                 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n");
4382                 goto err;
4383         }
4384
4385         rc = bnxt_init_resources(bp, true);
4386         if (rc) {
4387                 PMD_DRV_LOG(ERR,
4388                             "Failed to initialize resources after reset\n");
4389                 goto err;
4390         }
4391         /* clear reset flag as the device is initialized now */
4392         bp->flags &= ~BNXT_FLAG_FW_RESET;
4393
4394         rc = bnxt_dev_start_op(bp->eth_dev);
4395         if (rc) {
4396                 PMD_DRV_LOG(ERR, "Failed to start port after reset\n");
4397                 goto err_start;
4398         }
4399
4400         rc = bnxt_restore_filters(bp);
4401         if (rc)
4402                 goto err_start;
4403
4404         PMD_DRV_LOG(INFO, "Recovered from FW reset\n");
4405         return;
4406 err_start:
4407         bnxt_dev_stop_op(bp->eth_dev);
4408 err:
4409         bp->flags |= BNXT_FLAG_FATAL_ERROR;
4410         bnxt_uninit_resources(bp, false);
4411         PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
4412 }
4413
4414 void bnxt_dev_reset_and_resume(void *arg)
4415 {
4416         struct bnxt *bp = arg;
4417         int rc;
4418
4419         bnxt_dev_cleanup(bp);
4420
4421         bnxt_wait_for_device_shutdown(bp);
4422
4423         rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs,
4424                                bnxt_dev_recover, (void *)bp);
4425         if (rc)
4426                 PMD_DRV_LOG(ERR, "Error setting recovery alarm");
4427 }
4428
4429 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index)
4430 {
4431         struct bnxt_error_recovery_info *info = bp->recovery_info;
4432         uint32_t reg = info->status_regs[index];
4433         uint32_t type, offset, val = 0;
4434
4435         type = BNXT_FW_STATUS_REG_TYPE(reg);
4436         offset = BNXT_FW_STATUS_REG_OFF(reg);
4437
4438         switch (type) {
4439         case BNXT_FW_STATUS_REG_TYPE_CFG:
4440                 rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
4441                 break;
4442         case BNXT_FW_STATUS_REG_TYPE_GRC:
4443                 offset = info->mapped_status_regs[index];
4444                 /* FALLTHROUGH */
4445         case BNXT_FW_STATUS_REG_TYPE_BAR0:
4446                 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
4447                                        offset));
4448                 break;
4449         }
4450
4451         return val;
4452 }
4453
4454 static int bnxt_fw_reset_all(struct bnxt *bp)
4455 {
4456         struct bnxt_error_recovery_info *info = bp->recovery_info;
4457         uint32_t i;
4458         int rc = 0;
4459
4460         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4461                 /* Reset through master function driver */
4462                 for (i = 0; i < info->reg_array_cnt; i++)
4463                         bnxt_write_fw_reset_reg(bp, i);
4464                 /* Wait for time specified by FW after triggering reset */
4465                 rte_delay_ms(info->master_func_wait_period_after_reset);
4466         } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) {
4467                 /* Reset with the help of Kong processor */
4468                 rc = bnxt_hwrm_fw_reset(bp);
4469                 if (rc)
4470                         PMD_DRV_LOG(ERR, "Failed to reset FW\n");
4471         }
4472
4473         return rc;
4474 }
4475
4476 static void bnxt_fw_reset_cb(void *arg)
4477 {
4478         struct bnxt *bp = arg;
4479         struct bnxt_error_recovery_info *info = bp->recovery_info;
4480         int rc = 0;
4481
4482         /* Only Master function can do FW reset */
4483         if (bnxt_is_master_func(bp) &&
4484             bnxt_is_recovery_enabled(bp)) {
4485                 rc = bnxt_fw_reset_all(bp);
4486                 if (rc) {
4487                         PMD_DRV_LOG(ERR, "Adapter recovery failed\n");
4488                         return;
4489                 }
4490         }
4491
4492         /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send
4493          * EXCEPTION_FATAL_ASYNC event to all the functions
4494          * (including MASTER FUNC). After receiving this Async, all the active
4495          * drivers should treat this case as FW initiated recovery
4496          */
4497         if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) {
4498                 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT;
4499                 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT;
4500
4501                 /* To recover from error */
4502                 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume,
4503                                   (void *)bp);
4504         }
4505 }
4506
4507 /* Driver should poll FW heartbeat, reset_counter with the frequency
4508  * advertised by FW in HWRM_ERROR_RECOVERY_QCFG.
4509  * When the driver detects heartbeat stop or change in reset_counter,
4510  * it has to trigger a reset to recover from the error condition.
4511  * A “master PF” is the function who will have the privilege to
4512  * initiate the chimp reset. The master PF will be elected by the
4513  * firmware and will be notified through async message.
4514  */
4515 static void bnxt_check_fw_health(void *arg)
4516 {
4517         struct bnxt *bp = arg;
4518         struct bnxt_error_recovery_info *info = bp->recovery_info;
4519         uint32_t val = 0, wait_msec;
4520
4521         if (!info || !bnxt_is_recovery_enabled(bp) ||
4522             is_bnxt_in_error(bp))
4523                 return;
4524
4525         val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG);
4526         if (val == info->last_heart_beat)
4527                 goto reset;
4528
4529         info->last_heart_beat = val;
4530
4531         val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG);
4532         if (val != info->last_reset_counter)
4533                 goto reset;
4534
4535         info->last_reset_counter = val;
4536
4537         rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq,
4538                           bnxt_check_fw_health, (void *)bp);
4539
4540         return;
4541 reset:
4542         /* Stop DMA to/from device */
4543         bp->flags |= BNXT_FLAG_FATAL_ERROR;
4544         bp->flags |= BNXT_FLAG_FW_RESET;
4545
4546         PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
4547
4548         if (bnxt_is_master_func(bp))
4549                 wait_msec = info->master_func_wait_period;
4550         else
4551                 wait_msec = info->normal_func_wait_period;
4552
4553         rte_eal_alarm_set(US_PER_MS * wait_msec,
4554                           bnxt_fw_reset_cb, (void *)bp);
4555 }
4556
4557 void bnxt_schedule_fw_health_check(struct bnxt *bp)
4558 {
4559         uint32_t polling_freq;
4560
4561         if (!bnxt_is_recovery_enabled(bp))
4562                 return;
4563
4564         if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED)
4565                 return;
4566
4567         polling_freq = bp->recovery_info->driver_polling_freq;
4568
4569         rte_eal_alarm_set(US_PER_MS * polling_freq,
4570                           bnxt_check_fw_health, (void *)bp);
4571         bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4572 }
4573
4574 static void bnxt_cancel_fw_health_check(struct bnxt *bp)
4575 {
4576         if (!bnxt_is_recovery_enabled(bp))
4577                 return;
4578
4579         rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
4580         bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
4581 }
4582
4583 static bool bnxt_vf_pciid(uint16_t device_id)
4584 {
4585         switch (device_id) {
4586         case BROADCOM_DEV_ID_57304_VF:
4587         case BROADCOM_DEV_ID_57406_VF:
4588         case BROADCOM_DEV_ID_5731X_VF:
4589         case BROADCOM_DEV_ID_5741X_VF:
4590         case BROADCOM_DEV_ID_57414_VF:
4591         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4592         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4593         case BROADCOM_DEV_ID_58802_VF:
4594         case BROADCOM_DEV_ID_57500_VF1:
4595         case BROADCOM_DEV_ID_57500_VF2:
4596                 /* FALLTHROUGH */
4597                 return true;
4598         default:
4599                 return false;
4600         }
4601 }
4602
4603 static bool bnxt_thor_device(uint16_t device_id)
4604 {
4605         switch (device_id) {
4606         case BROADCOM_DEV_ID_57508:
4607         case BROADCOM_DEV_ID_57504:
4608         case BROADCOM_DEV_ID_57502:
4609         case BROADCOM_DEV_ID_57508_MF1:
4610         case BROADCOM_DEV_ID_57504_MF1:
4611         case BROADCOM_DEV_ID_57502_MF1:
4612         case BROADCOM_DEV_ID_57508_MF2:
4613         case BROADCOM_DEV_ID_57504_MF2:
4614         case BROADCOM_DEV_ID_57502_MF2:
4615         case BROADCOM_DEV_ID_57500_VF1:
4616         case BROADCOM_DEV_ID_57500_VF2:
4617                 /* FALLTHROUGH */
4618                 return true;
4619         default:
4620                 return false;
4621         }
4622 }
4623
4624 bool bnxt_stratus_device(struct bnxt *bp)
4625 {
4626         uint16_t device_id = bp->pdev->id.device_id;
4627
4628         switch (device_id) {
4629         case BROADCOM_DEV_ID_STRATUS_NIC:
4630         case BROADCOM_DEV_ID_STRATUS_NIC_VF1:
4631         case BROADCOM_DEV_ID_STRATUS_NIC_VF2:
4632                 /* FALLTHROUGH */
4633                 return true;
4634         default:
4635                 return false;
4636         }
4637 }
4638
4639 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
4640 {
4641         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
4642         struct bnxt *bp = eth_dev->data->dev_private;
4643
4644         /* enable device (incl. PCI PM wakeup), and bus-mastering */
4645         bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
4646         bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
4647         if (!bp->bar0 || !bp->doorbell_base) {
4648                 PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
4649                 return -ENODEV;
4650         }
4651
4652         bp->eth_dev = eth_dev;
4653         bp->pdev = pci_dev;
4654
4655         return 0;
4656 }
4657
4658 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
4659                                   struct bnxt_ctx_pg_info *ctx_pg,
4660                                   uint32_t mem_size,
4661                                   const char *suffix,
4662                                   uint16_t idx)
4663 {
4664         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
4665         const struct rte_memzone *mz = NULL;
4666         char mz_name[RTE_MEMZONE_NAMESIZE];
4667         rte_iova_t mz_phys_addr;
4668         uint64_t valid_bits = 0;
4669         uint32_t sz;
4670         int i;
4671
4672         if (!mem_size)
4673                 return 0;
4674
4675         rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) /
4676                          BNXT_PAGE_SIZE;
4677         rmem->page_size = BNXT_PAGE_SIZE;
4678         rmem->pg_arr = ctx_pg->ctx_pg_arr;
4679         rmem->dma_arr = ctx_pg->ctx_dma_arr;
4680         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
4681
4682         valid_bits = PTU_PTE_VALID;
4683
4684         if (rmem->nr_pages > 1) {
4685                 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4686                          "bnxt_ctx_pg_tbl%s_%x_%d",
4687                          suffix, idx, bp->eth_dev->data->port_id);
4688                 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4689                 mz = rte_memzone_lookup(mz_name);
4690                 if (!mz) {
4691                         mz = rte_memzone_reserve_aligned(mz_name,
4692                                                 rmem->nr_pages * 8,
4693                                                 SOCKET_ID_ANY,
4694                                                 RTE_MEMZONE_2MB |
4695                                                 RTE_MEMZONE_SIZE_HINT_ONLY |
4696                                                 RTE_MEMZONE_IOVA_CONTIG,
4697                                                 BNXT_PAGE_SIZE);
4698                         if (mz == NULL)
4699                                 return -ENOMEM;
4700                 }
4701
4702                 memset(mz->addr, 0, mz->len);
4703                 mz_phys_addr = mz->iova;
4704
4705                 rmem->pg_tbl = mz->addr;
4706                 rmem->pg_tbl_map = mz_phys_addr;
4707                 rmem->pg_tbl_mz = mz;
4708         }
4709
4710         snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d",
4711                  suffix, idx, bp->eth_dev->data->port_id);
4712         mz = rte_memzone_lookup(mz_name);
4713         if (!mz) {
4714                 mz = rte_memzone_reserve_aligned(mz_name,
4715                                                  mem_size,
4716                                                  SOCKET_ID_ANY,
4717                                                  RTE_MEMZONE_1GB |
4718                                                  RTE_MEMZONE_SIZE_HINT_ONLY |
4719                                                  RTE_MEMZONE_IOVA_CONTIG,
4720                                                  BNXT_PAGE_SIZE);
4721                 if (mz == NULL)
4722                         return -ENOMEM;
4723         }
4724
4725         memset(mz->addr, 0, mz->len);
4726         mz_phys_addr = mz->iova;
4727
4728         for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
4729                 rmem->pg_arr[i] = ((char *)mz->addr) + sz;
4730                 rmem->dma_arr[i] = mz_phys_addr + sz;
4731
4732                 if (rmem->nr_pages > 1) {
4733                         if (i == rmem->nr_pages - 2 &&
4734                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4735                                 valid_bits |= PTU_PTE_NEXT_TO_LAST;
4736                         else if (i == rmem->nr_pages - 1 &&
4737                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
4738                                 valid_bits |= PTU_PTE_LAST;
4739
4740                         rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] |
4741                                                            valid_bits);
4742                 }
4743         }
4744
4745         rmem->mz = mz;
4746         if (rmem->vmem_size)
4747                 rmem->vmem = (void **)mz->addr;
4748         rmem->dma_arr[0] = mz_phys_addr;
4749         return 0;
4750 }
4751
4752 static void bnxt_free_ctx_mem(struct bnxt *bp)
4753 {
4754         int i;
4755
4756         if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED))
4757                 return;
4758
4759         bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED;
4760         rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz);
4761         rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz);
4762         rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz);
4763         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz);
4764         rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz);
4765         rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz);
4766         rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz);
4767         rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz);
4768         rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz);
4769         rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz);
4770
4771         for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) {
4772                 if (bp->ctx->tqm_mem[i])
4773                         rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz);
4774         }
4775
4776         rte_free(bp->ctx);
4777         bp->ctx = NULL;
4778 }
4779
4780 #define bnxt_roundup(x, y)   ((((x) + ((y) - 1)) / (y)) * (y))
4781
4782 #define min_t(type, x, y) ({                    \
4783         type __min1 = (x);                      \
4784         type __min2 = (y);                      \
4785         __min1 < __min2 ? __min1 : __min2; })
4786
4787 #define max_t(type, x, y) ({                    \
4788         type __max1 = (x);                      \
4789         type __max2 = (y);                      \
4790         __max1 > __max2 ? __max1 : __max2; })
4791
4792 #define clamp_t(type, _x, min, max)     min_t(type, max_t(type, _x, min), max)
4793
4794 int bnxt_alloc_ctx_mem(struct bnxt *bp)
4795 {
4796         struct bnxt_ctx_pg_info *ctx_pg;
4797         struct bnxt_ctx_mem_info *ctx;
4798         uint32_t mem_size, ena, entries;
4799         uint32_t entries_sp, min;
4800         int i, rc;
4801
4802         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
4803         if (rc) {
4804                 PMD_DRV_LOG(ERR, "Query context mem capability failed\n");
4805                 return rc;
4806         }
4807         ctx = bp->ctx;
4808         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
4809                 return 0;
4810
4811         ctx_pg = &ctx->qp_mem;
4812         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
4813         mem_size = ctx->qp_entry_size * ctx_pg->entries;
4814         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
4815         if (rc)
4816                 return rc;
4817
4818         ctx_pg = &ctx->srq_mem;
4819         ctx_pg->entries = ctx->srq_max_l2_entries;
4820         mem_size = ctx->srq_entry_size * ctx_pg->entries;
4821         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
4822         if (rc)
4823                 return rc;
4824
4825         ctx_pg = &ctx->cq_mem;
4826         ctx_pg->entries = ctx->cq_max_l2_entries;
4827         mem_size = ctx->cq_entry_size * ctx_pg->entries;
4828         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
4829         if (rc)
4830                 return rc;
4831
4832         ctx_pg = &ctx->vnic_mem;
4833         ctx_pg->entries = ctx->vnic_max_vnic_entries +
4834                 ctx->vnic_max_ring_table_entries;
4835         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
4836         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
4837         if (rc)
4838                 return rc;
4839
4840         ctx_pg = &ctx->stat_mem;
4841         ctx_pg->entries = ctx->stat_max_entries;
4842         mem_size = ctx->stat_entry_size * ctx_pg->entries;
4843         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
4844         if (rc)
4845                 return rc;
4846
4847         min = ctx->tqm_min_entries_per_ring;
4848
4849         entries_sp = ctx->qp_max_l2_entries +
4850                      ctx->vnic_max_vnic_entries +
4851                      2 * ctx->qp_min_qp1_entries + min;
4852         entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple);
4853
4854         entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries;
4855         entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
4856         entries = clamp_t(uint32_t, entries, min,
4857                           ctx->tqm_max_entries_per_ring);
4858         for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
4859                 ctx_pg = ctx->tqm_mem[i];
4860                 ctx_pg->entries = i ? entries : entries_sp;
4861                 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
4862                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
4863                 if (rc)
4864                         return rc;
4865                 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
4866         }
4867
4868         ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
4869         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
4870         if (rc)
4871                 PMD_DRV_LOG(ERR,
4872                             "Failed to configure context mem: rc = %d\n", rc);
4873         else
4874                 ctx->flags |= BNXT_CTX_FLAG_INITED;
4875
4876         return rc;
4877 }
4878
4879 static int bnxt_alloc_stats_mem(struct bnxt *bp)
4880 {
4881         struct rte_pci_device *pci_dev = bp->pdev;
4882         char mz_name[RTE_MEMZONE_NAMESIZE];
4883         const struct rte_memzone *mz = NULL;
4884         uint32_t total_alloc_len;
4885         rte_iova_t mz_phys_addr;
4886
4887         if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2)
4888                 return 0;
4889
4890         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4891                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4892                  pci_dev->addr.bus, pci_dev->addr.devid,
4893                  pci_dev->addr.function, "rx_port_stats");
4894         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4895         mz = rte_memzone_lookup(mz_name);
4896         total_alloc_len =
4897                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) +
4898                                        sizeof(struct rx_port_stats_ext) + 512);
4899         if (!mz) {
4900                 mz = rte_memzone_reserve(mz_name, total_alloc_len,
4901                                          SOCKET_ID_ANY,
4902                                          RTE_MEMZONE_2MB |
4903                                          RTE_MEMZONE_SIZE_HINT_ONLY |
4904                                          RTE_MEMZONE_IOVA_CONTIG);
4905                 if (mz == NULL)
4906                         return -ENOMEM;
4907         }
4908         memset(mz->addr, 0, mz->len);
4909         mz_phys_addr = mz->iova;
4910
4911         bp->rx_mem_zone = (const void *)mz;
4912         bp->hw_rx_port_stats = mz->addr;
4913         bp->hw_rx_port_stats_map = mz_phys_addr;
4914
4915         snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
4916                  "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain,
4917                  pci_dev->addr.bus, pci_dev->addr.devid,
4918                  pci_dev->addr.function, "tx_port_stats");
4919         mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
4920         mz = rte_memzone_lookup(mz_name);
4921         total_alloc_len =
4922                 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) +
4923                                        sizeof(struct tx_port_stats_ext) + 512);
4924         if (!mz) {
4925                 mz = rte_memzone_reserve(mz_name,
4926                                          total_alloc_len,
4927                                          SOCKET_ID_ANY,
4928                                          RTE_MEMZONE_2MB |
4929                                          RTE_MEMZONE_SIZE_HINT_ONLY |
4930                                          RTE_MEMZONE_IOVA_CONTIG);
4931                 if (mz == NULL)
4932                         return -ENOMEM;
4933         }
4934         memset(mz->addr, 0, mz->len);
4935         mz_phys_addr = mz->iova;
4936
4937         bp->tx_mem_zone = (const void *)mz;
4938         bp->hw_tx_port_stats = mz->addr;
4939         bp->hw_tx_port_stats_map = mz_phys_addr;
4940         bp->flags |= BNXT_FLAG_PORT_STATS;
4941
4942         /* Display extended statistics if FW supports it */
4943         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 ||
4944             bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 ||
4945             !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED))
4946                 return 0;
4947
4948         bp->hw_rx_port_stats_ext = (void *)
4949                 ((uint8_t *)bp->hw_rx_port_stats +
4950                  sizeof(struct rx_port_stats));
4951         bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
4952                 sizeof(struct rx_port_stats);
4953         bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
4954
4955         if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 ||
4956             bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) {
4957                 bp->hw_tx_port_stats_ext = (void *)
4958                         ((uint8_t *)bp->hw_tx_port_stats +
4959                          sizeof(struct tx_port_stats));
4960                 bp->hw_tx_port_stats_ext_map =
4961                         bp->hw_tx_port_stats_map +
4962                         sizeof(struct tx_port_stats);
4963                 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS;
4964         }
4965
4966         return 0;
4967 }
4968
4969 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
4970 {
4971         struct bnxt *bp = eth_dev->data->dev_private;
4972         int rc = 0;
4973
4974         eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
4975                                                RTE_ETHER_ADDR_LEN *
4976                                                bp->max_l2_ctx,
4977                                                0);
4978         if (eth_dev->data->mac_addrs == NULL) {
4979                 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n");
4980                 return -ENOMEM;
4981         }
4982
4983         if (!BNXT_HAS_DFLT_MAC_SET(bp)) {
4984                 if (BNXT_PF(bp))
4985                         return -EINVAL;
4986
4987                 /* Generate a random MAC address, if none was assigned by PF */
4988                 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
4989                 bnxt_eth_hw_addr_random(bp->mac_addr);
4990                 PMD_DRV_LOG(INFO,
4991                             "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
4992                             bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
4993                             bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
4994
4995                 rc = bnxt_hwrm_set_mac(bp);
4996                 if (rc)
4997                         return rc;
4998         }
4999
5000         /* Copy the permanent MAC from the FUNC_QCAPS response */
5001         memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
5002
5003         return rc;
5004 }
5005
5006 static int bnxt_restore_dflt_mac(struct bnxt *bp)
5007 {
5008         int rc = 0;
5009
5010         /* MAC is already configured in FW */
5011         if (BNXT_HAS_DFLT_MAC_SET(bp))
5012                 return 0;
5013
5014         /* Restore the old MAC configured */
5015         rc = bnxt_hwrm_set_mac(bp);
5016         if (rc)
5017                 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n");
5018
5019         return rc;
5020 }
5021
5022 static void bnxt_config_vf_req_fwd(struct bnxt *bp)
5023 {
5024         if (!BNXT_PF(bp))
5025                 return;
5026
5027 #define ALLOW_FUNC(x)   \
5028         { \
5029                 uint32_t arg = (x); \
5030                 bp->pf->vf_req_fwd[((arg) >> 5)] &= \
5031                 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
5032         }
5033
5034         /* Forward all requests if firmware is new enough */
5035         if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
5036              (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
5037             ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
5038                 memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));
5039         } else {
5040                 PMD_DRV_LOG(WARNING,
5041                             "Firmware too old for VF mailbox functionality\n");
5042                 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
5043         }
5044
5045         /*
5046          * The following are used for driver cleanup. If we disallow these,
5047          * VF drivers can't clean up cleanly.
5048          */
5049         ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
5050         ALLOW_FUNC(HWRM_VNIC_FREE);
5051         ALLOW_FUNC(HWRM_RING_FREE);
5052         ALLOW_FUNC(HWRM_RING_GRP_FREE);
5053         ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
5054         ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
5055         ALLOW_FUNC(HWRM_STAT_CTX_FREE);
5056         ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
5057         ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
5058 }
5059
5060 uint16_t
5061 bnxt_get_svif(uint16_t port_id, bool func_svif)
5062 {
5063         struct rte_eth_dev *eth_dev;
5064         struct bnxt *bp;
5065
5066         eth_dev = &rte_eth_devices[port_id];
5067         bp = eth_dev->data->dev_private;
5068
5069         return func_svif ? bp->func_svif : bp->port_svif;
5070 }
5071
5072 uint16_t
5073 bnxt_get_vnic_id(uint16_t port)
5074 {
5075         struct rte_eth_dev *eth_dev;
5076         struct bnxt_vnic_info *vnic;
5077         struct bnxt *bp;
5078
5079         eth_dev = &rte_eth_devices[port];
5080         bp = eth_dev->data->dev_private;
5081
5082         vnic = BNXT_GET_DEFAULT_VNIC(bp);
5083
5084         return vnic->fw_vnic_id;
5085 }
5086
5087 uint16_t
5088 bnxt_get_fw_func_id(uint16_t port)
5089 {
5090         struct rte_eth_dev *eth_dev;
5091         struct bnxt *bp;
5092
5093         eth_dev = &rte_eth_devices[port];
5094         bp = eth_dev->data->dev_private;
5095
5096         return bp->fw_fid;
5097 }
5098
5099 static void bnxt_alloc_error_recovery_info(struct bnxt *bp)
5100 {
5101         struct bnxt_error_recovery_info *info = bp->recovery_info;
5102
5103         if (info) {
5104                 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))
5105                         memset(info, 0, sizeof(*info));
5106                 return;
5107         }
5108
5109         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5110                 return;
5111
5112         info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
5113                            sizeof(*info), 0);
5114         if (!info)
5115                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5116
5117         bp->recovery_info = info;
5118 }
5119
5120 static void bnxt_check_fw_status(struct bnxt *bp)
5121 {
5122         uint32_t fw_status;
5123
5124         if (!(bp->recovery_info &&
5125               (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)))
5126                 return;
5127
5128         fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG);
5129         if (fw_status != BNXT_FW_STATUS_HEALTHY)
5130                 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n",
5131                             fw_status);
5132 }
5133
5134 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp)
5135 {
5136         struct bnxt_error_recovery_info *info = bp->recovery_info;
5137         uint32_t status_loc;
5138         uint32_t sig_ver;
5139
5140         rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 +
5141                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5142         sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5143                                    BNXT_GRCP_WINDOW_2_BASE +
5144                                    offsetof(struct hcomm_status,
5145                                             sig_ver)));
5146         /* If the signature is absent, then FW does not support this feature */
5147         if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) !=
5148             HCOMM_STATUS_SIGNATURE_VAL)
5149                 return 0;
5150
5151         if (!info) {
5152                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
5153                                    sizeof(*info), 0);
5154                 if (!info)
5155                         return -ENOMEM;
5156                 bp->recovery_info = info;
5157         } else {
5158                 memset(info, 0, sizeof(*info));
5159         }
5160
5161         status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
5162                                       BNXT_GRCP_WINDOW_2_BASE +
5163                                       offsetof(struct hcomm_status,
5164                                                fw_status_loc)));
5165
5166         /* Only pre-map the FW health status GRC register */
5167         if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC)
5168                 return 0;
5169
5170         info->status_regs[BNXT_FW_STATUS_REG] = status_loc;
5171         info->mapped_status_regs[BNXT_FW_STATUS_REG] =
5172                 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK);
5173
5174         rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 +
5175                     BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
5176
5177         bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS;
5178
5179         return 0;
5180 }
5181
5182 static int bnxt_init_fw(struct bnxt *bp)
5183 {
5184         uint16_t mtu;
5185         int rc = 0;
5186
5187         bp->fw_cap = 0;
5188
5189         rc = bnxt_map_hcomm_fw_status_reg(bp);
5190         if (rc)
5191                 return rc;
5192
5193         rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT);
5194         if (rc) {
5195                 bnxt_check_fw_status(bp);
5196                 return rc;
5197         }
5198
5199         rc = bnxt_hwrm_func_reset(bp);
5200         if (rc)
5201                 return -EIO;
5202
5203         rc = bnxt_hwrm_vnic_qcaps(bp);
5204         if (rc)
5205                 return rc;
5206
5207         rc = bnxt_hwrm_queue_qportcfg(bp);
5208         if (rc)
5209                 return rc;
5210
5211         /* Get the MAX capabilities for this function.
5212          * This function also allocates context memory for TQM rings and
5213          * informs the firmware about this allocated backing store memory.
5214          */
5215         rc = bnxt_hwrm_func_qcaps(bp);
5216         if (rc)
5217                 return rc;
5218
5219         rc = bnxt_hwrm_func_qcfg(bp, &mtu);
5220         if (rc)
5221                 return rc;
5222
5223         bnxt_hwrm_port_mac_qcfg(bp);
5224
5225         rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
5226         if (rc)
5227                 return rc;
5228
5229         bnxt_alloc_error_recovery_info(bp);
5230         /* Get the adapter error recovery support info */
5231         rc = bnxt_hwrm_error_recovery_qcfg(bp);
5232         if (rc)
5233                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5234
5235         bnxt_hwrm_port_led_qcaps(bp);
5236
5237         return 0;
5238 }
5239
5240 static int
5241 bnxt_init_locks(struct bnxt *bp)
5242 {
5243         int err;
5244
5245         err = pthread_mutex_init(&bp->flow_lock, NULL);
5246         if (err) {
5247                 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n");
5248                 return err;
5249         }
5250
5251         err = pthread_mutex_init(&bp->def_cp_lock, NULL);
5252         if (err)
5253                 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n");
5254         return err;
5255 }
5256
5257 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev)
5258 {
5259         int rc;
5260
5261         rc = bnxt_init_fw(bp);
5262         if (rc)
5263                 return rc;
5264
5265         if (!reconfig_dev) {
5266                 rc = bnxt_setup_mac_addr(bp->eth_dev);
5267                 if (rc)
5268                         return rc;
5269         } else {
5270                 rc = bnxt_restore_dflt_mac(bp);
5271                 if (rc)
5272                         return rc;
5273         }
5274
5275         bnxt_config_vf_req_fwd(bp);
5276
5277         rc = bnxt_hwrm_func_driver_register(bp);
5278         if (rc) {
5279                 PMD_DRV_LOG(ERR, "Failed to register driver");
5280                 return -EBUSY;
5281         }
5282
5283         if (BNXT_PF(bp)) {
5284                 if (bp->pdev->max_vfs) {
5285                         rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
5286                         if (rc) {
5287                                 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
5288                                 return rc;
5289                         }
5290                 } else {
5291                         rc = bnxt_hwrm_allocate_pf_only(bp);
5292                         if (rc) {
5293                                 PMD_DRV_LOG(ERR,
5294                                             "Failed to allocate PF resources");
5295                                 return rc;
5296                         }
5297                 }
5298         }
5299
5300         rc = bnxt_alloc_mem(bp, reconfig_dev);
5301         if (rc)
5302                 return rc;
5303
5304         rc = bnxt_setup_int(bp);
5305         if (rc)
5306                 return rc;
5307
5308         rc = bnxt_request_int(bp);
5309         if (rc)
5310                 return rc;
5311
5312         rc = bnxt_init_ctx_mem(bp);
5313         if (rc) {
5314                 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n");
5315                 return rc;
5316         }
5317
5318         rc = bnxt_init_locks(bp);
5319         if (rc)
5320                 return rc;
5321
5322         return 0;
5323 }
5324
5325 static int
5326 bnxt_parse_devarg_truflow(__rte_unused const char *key,
5327                           const char *value, void *opaque_arg)
5328 {
5329         struct bnxt *bp = opaque_arg;
5330         unsigned long truflow;
5331         char *end = NULL;
5332
5333         if (!value || !opaque_arg) {
5334                 PMD_DRV_LOG(ERR,
5335                             "Invalid parameter passed to truflow devargs.\n");
5336                 return -EINVAL;
5337         }
5338
5339         truflow = strtoul(value, &end, 10);
5340         if (end == NULL || *end != '\0' ||
5341             (truflow == ULONG_MAX && errno == ERANGE)) {
5342                 PMD_DRV_LOG(ERR,
5343                             "Invalid parameter passed to truflow devargs.\n");
5344                 return -EINVAL;
5345         }
5346
5347         if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) {
5348                 PMD_DRV_LOG(ERR,
5349                             "Invalid value passed to truflow devargs.\n");
5350                 return -EINVAL;
5351         }
5352
5353         bp->flags |= BNXT_FLAG_TRUFLOW_EN;
5354         if (BNXT_TRUFLOW_EN(bp))
5355                 PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n");
5356
5357         return 0;
5358 }
5359
5360 static int
5361 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
5362                              const char *value, void *opaque_arg)
5363 {
5364         struct bnxt *bp = opaque_arg;
5365         unsigned long flow_xstat;
5366         char *end = NULL;
5367
5368         if (!value || !opaque_arg) {
5369                 PMD_DRV_LOG(ERR,
5370                             "Invalid parameter passed to flow_xstat devarg.\n");
5371                 return -EINVAL;
5372         }
5373
5374         flow_xstat = strtoul(value, &end, 10);
5375         if (end == NULL || *end != '\0' ||
5376             (flow_xstat == ULONG_MAX && errno == ERANGE)) {
5377                 PMD_DRV_LOG(ERR,
5378                             "Invalid parameter passed to flow_xstat devarg.\n");
5379                 return -EINVAL;
5380         }
5381
5382         if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) {
5383                 PMD_DRV_LOG(ERR,
5384                             "Invalid value passed to flow_xstat devarg.\n");
5385                 return -EINVAL;
5386         }
5387
5388         bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN;
5389         if (BNXT_FLOW_XSTATS_EN(bp))
5390                 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n");
5391
5392         return 0;
5393 }
5394
5395 static void
5396 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
5397 {
5398         struct rte_kvargs *kvlist;
5399
5400         if (devargs == NULL)
5401                 return;
5402
5403         kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args);
5404         if (kvlist == NULL)
5405                 return;
5406
5407         /*
5408          * Handler for "truflow" devarg.
5409          * Invoked as for ex: "-w 0000:00:0d.0,host-based-truflow=1”
5410          */
5411         rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
5412                            bnxt_parse_devarg_truflow, bp);
5413
5414         /*
5415          * Handler for "flow_xstat" devarg.
5416          * Invoked as for ex: "-w 0000:00:0d.0,flow_xstat=1”
5417          */
5418         rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT,
5419                            bnxt_parse_devarg_flow_xstat, bp);
5420
5421         rte_kvargs_free(kvlist);
5422 }
5423
5424 static int
5425 bnxt_dev_init(struct rte_eth_dev *eth_dev)
5426 {
5427         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
5428         static int version_printed;
5429         struct bnxt *bp;
5430         int rc;
5431
5432         if (version_printed++ == 0)
5433                 PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
5434
5435         eth_dev->dev_ops = &bnxt_dev_ops;
5436         eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
5437         eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
5438
5439         /*
5440          * For secondary processes, we don't initialise any further
5441          * as primary has already done this work.
5442          */
5443         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5444                 return 0;
5445
5446         rte_eth_copy_pci_info(eth_dev, pci_dev);
5447
5448         bp = eth_dev->data->dev_private;
5449
5450         /* Parse dev arguments passed on when starting the DPDK application. */
5451         bnxt_parse_dev_args(bp, pci_dev->device.devargs);
5452
5453         bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
5454
5455         if (bnxt_vf_pciid(pci_dev->id.device_id))
5456                 bp->flags |= BNXT_FLAG_VF;
5457
5458         if (bnxt_thor_device(pci_dev->id.device_id))
5459                 bp->flags |= BNXT_FLAG_THOR_CHIP;
5460
5461         if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
5462             pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
5463             pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
5464             pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
5465                 bp->flags |= BNXT_FLAG_STINGRAY;
5466
5467         rc = bnxt_init_board(eth_dev);
5468         if (rc) {
5469                 PMD_DRV_LOG(ERR,
5470                             "Failed to initialize board rc: %x\n", rc);
5471                 return rc;
5472         }
5473
5474         rc = bnxt_alloc_pf_info(bp);
5475         if (rc)
5476                 goto error_free;
5477
5478         rc = bnxt_alloc_link_info(bp);
5479         if (rc)
5480                 goto error_free;
5481
5482         rc = bnxt_alloc_hwrm_resources(bp);
5483         if (rc) {
5484                 PMD_DRV_LOG(ERR,
5485                             "Failed to allocate hwrm resource rc: %x\n", rc);
5486                 goto error_free;
5487         }
5488         rc = bnxt_alloc_leds_info(bp);
5489         if (rc)
5490                 goto error_free;
5491
5492         rc = bnxt_alloc_cos_queues(bp);
5493         if (rc)
5494                 goto error_free;
5495
5496         rc = bnxt_init_resources(bp, false);
5497         if (rc)
5498                 goto error_free;
5499
5500         rc = bnxt_alloc_stats_mem(bp);
5501         if (rc)
5502                 goto error_free;
5503
5504         /* Pass the information to the rte_eth_dev_close() that it should also
5505          * release the private port resources.
5506          */
5507         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
5508
5509         PMD_DRV_LOG(INFO,
5510                     DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
5511                     pci_dev->mem_resource[0].phys_addr,
5512                     pci_dev->mem_resource[0].addr);
5513
5514         return 0;
5515
5516 error_free:
5517         bnxt_dev_uninit(eth_dev);
5518         return rc;
5519 }
5520
5521
5522 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx)
5523 {
5524         if (!ctx)
5525                 return;
5526
5527         if (ctx->va)
5528                 rte_free(ctx->va);
5529
5530         ctx->va = NULL;
5531         ctx->dma = RTE_BAD_IOVA;
5532         ctx->ctx_id = BNXT_CTX_VAL_INVAL;
5533 }
5534
5535 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp)
5536 {
5537         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
5538                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5539                                   bp->flow_stat->rx_fc_out_tbl.ctx_id,
5540                                   bp->flow_stat->max_fc,
5541                                   false);
5542
5543         bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
5544                                   CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
5545                                   bp->flow_stat->tx_fc_out_tbl.ctx_id,
5546                                   bp->flow_stat->max_fc,
5547                                   false);
5548
5549         if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5550                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id);
5551         bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5552
5553         if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5554                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id);
5555         bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5556
5557         if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5558                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id);
5559         bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5560
5561         if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
5562                 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id);
5563         bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
5564 }
5565
5566 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp)
5567 {
5568         bnxt_unregister_fc_ctx_mem(bp);
5569
5570         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl);
5571         bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl);
5572         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl);
5573         bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl);
5574 }
5575
5576 static void bnxt_uninit_ctx_mem(struct bnxt *bp)
5577 {
5578         if (BNXT_FLOW_XSTATS_EN(bp))
5579                 bnxt_uninit_fc_ctx_mem(bp);
5580 }
5581
5582 static void
5583 bnxt_free_error_recovery_info(struct bnxt *bp)
5584 {
5585         rte_free(bp->recovery_info);
5586         bp->recovery_info = NULL;
5587         bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
5588 }
5589
5590 static void
5591 bnxt_uninit_locks(struct bnxt *bp)
5592 {
5593         pthread_mutex_destroy(&bp->flow_lock);
5594         pthread_mutex_destroy(&bp->def_cp_lock);
5595 }
5596
5597 static int
5598 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
5599 {
5600         int rc;
5601
5602         bnxt_free_int(bp);
5603         bnxt_free_mem(bp, reconfig_dev);
5604         bnxt_hwrm_func_buf_unrgtr(bp);
5605         rc = bnxt_hwrm_func_driver_unregister(bp, 0);
5606         bp->flags &= ~BNXT_FLAG_REGISTERED;
5607         bnxt_free_ctx_mem(bp);
5608         if (!reconfig_dev) {
5609                 bnxt_free_hwrm_resources(bp);
5610                 bnxt_free_error_recovery_info(bp);
5611         }
5612
5613         bnxt_uninit_ctx_mem(bp);
5614
5615         bnxt_uninit_locks(bp);
5616         rte_free(bp->ptp_cfg);
5617         bp->ptp_cfg = NULL;
5618         return rc;
5619 }
5620
5621 static int
5622 bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
5623 {
5624         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5625                 return -EPERM;
5626
5627         PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
5628
5629         if (eth_dev->state != RTE_ETH_DEV_UNUSED)
5630                 bnxt_dev_close_op(eth_dev);
5631
5632         return 0;
5633 }
5634
5635 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5636         struct rte_pci_device *pci_dev)
5637 {
5638         return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
5639                 bnxt_dev_init);
5640 }
5641
5642 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
5643 {
5644         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
5645                 return rte_eth_dev_pci_generic_remove(pci_dev,
5646                                 bnxt_dev_uninit);
5647         else
5648                 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
5649 }
5650
5651 static struct rte_pci_driver bnxt_rte_pmd = {
5652         .id_table = bnxt_pci_id_map,
5653         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5654         .probe = bnxt_pci_probe,
5655         .remove = bnxt_pci_remove,
5656 };
5657
5658 static bool
5659 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
5660 {
5661         if (strcmp(dev->device->driver->name, drv->driver.name))
5662                 return false;
5663
5664         return true;
5665 }
5666
5667 bool is_bnxt_supported(struct rte_eth_dev *dev)
5668 {
5669         return is_device_supported(dev, &bnxt_rte_pmd);
5670 }
5671
5672 RTE_INIT(bnxt_init_log)
5673 {
5674         bnxt_logtype_driver = rte_log_register("pmd.net.bnxt.driver");
5675         if (bnxt_logtype_driver >= 0)
5676                 rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
5677 }
5678
5679 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
5680 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
5681 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");