3cf845089c8c5463e4cde3e99fcb7b1dfe043f42
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7 #include <stdbool.h>
8
9 #include <rte_dev.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_irq.h"
20 #include "bnxt_ring.h"
21 #include "bnxt_rxq.h"
22 #include "bnxt_rxr.h"
23 #include "bnxt_stats.h"
24 #include "bnxt_txq.h"
25 #include "bnxt_txr.h"
26 #include "bnxt_vnic.h"
27 #include "hsi_struct_def_dpdk.h"
28 #include "bnxt_nvm_defs.h"
29
30 #define DRV_MODULE_NAME         "bnxt"
31 static const char bnxt_version[] =
32         "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
33 int bnxt_logtype_driver;
34
35 #define PCI_VENDOR_ID_BROADCOM 0x14E4
36
37 #define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
38 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
39 #define BROADCOM_DEV_ID_57414_VF 0x16c1
40 #define BROADCOM_DEV_ID_57301 0x16c8
41 #define BROADCOM_DEV_ID_57302 0x16c9
42 #define BROADCOM_DEV_ID_57304_PF 0x16ca
43 #define BROADCOM_DEV_ID_57304_VF 0x16cb
44 #define BROADCOM_DEV_ID_57417_MF 0x16cc
45 #define BROADCOM_DEV_ID_NS2 0x16cd
46 #define BROADCOM_DEV_ID_57311 0x16ce
47 #define BROADCOM_DEV_ID_57312 0x16cf
48 #define BROADCOM_DEV_ID_57402 0x16d0
49 #define BROADCOM_DEV_ID_57404 0x16d1
50 #define BROADCOM_DEV_ID_57406_PF 0x16d2
51 #define BROADCOM_DEV_ID_57406_VF 0x16d3
52 #define BROADCOM_DEV_ID_57402_MF 0x16d4
53 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
54 #define BROADCOM_DEV_ID_57412 0x16d6
55 #define BROADCOM_DEV_ID_57414 0x16d7
56 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8
57 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9
58 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
59 #define BROADCOM_DEV_ID_57412_MF 0x16de
60 #define BROADCOM_DEV_ID_57314 0x16df
61 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0
62 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
63 #define BROADCOM_DEV_ID_57417_SFP 0x16e2
64 #define BROADCOM_DEV_ID_57416_SFP 0x16e3
65 #define BROADCOM_DEV_ID_57317_SFP 0x16e4
66 #define BROADCOM_DEV_ID_57404_MF 0x16e7
67 #define BROADCOM_DEV_ID_57406_MF 0x16e8
68 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
69 #define BROADCOM_DEV_ID_57407_MF 0x16ea
70 #define BROADCOM_DEV_ID_57414_MF 0x16ec
71 #define BROADCOM_DEV_ID_57416_MF 0x16ee
72 #define BROADCOM_DEV_ID_58802 0xd802
73 #define BROADCOM_DEV_ID_58804 0xd804
74 #define BROADCOM_DEV_ID_58808 0x16f0
75
76 static const struct rte_pci_id bnxt_pci_id_map[] = {
77         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
78                          BROADCOM_DEV_ID_STRATUS_NIC_VF) },
79         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
80         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
81         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
82         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
83         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
84         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
85         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
86         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
87         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
88         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
89         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
90         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
91         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
92         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
93         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
94         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
95         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
96         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
97         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
98         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
99         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
100         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
101         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
102         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
103         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
104         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
105         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
106         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
107         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
108         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
109         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
110         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
111         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
112         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
113         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
114         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
115         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
116         { .vendor_id = 0, /* sentinel */ },
117 };
118
119 #define BNXT_ETH_RSS_SUPPORT (  \
120         ETH_RSS_IPV4 |          \
121         ETH_RSS_NONFRAG_IPV4_TCP |      \
122         ETH_RSS_NONFRAG_IPV4_UDP |      \
123         ETH_RSS_IPV6 |          \
124         ETH_RSS_NONFRAG_IPV6_TCP |      \
125         ETH_RSS_NONFRAG_IPV6_UDP)
126
127 #define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
128                                      DEV_TX_OFFLOAD_IPV4_CKSUM | \
129                                      DEV_TX_OFFLOAD_TCP_CKSUM | \
130                                      DEV_TX_OFFLOAD_UDP_CKSUM | \
131                                      DEV_TX_OFFLOAD_TCP_TSO | \
132                                      DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
133                                      DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
134                                      DEV_TX_OFFLOAD_GRE_TNL_TSO | \
135                                      DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
136                                      DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
137                                      DEV_TX_OFFLOAD_MULTI_SEGS)
138
139 #define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
140                                      DEV_RX_OFFLOAD_VLAN_STRIP | \
141                                      DEV_RX_OFFLOAD_IPV4_CKSUM | \
142                                      DEV_RX_OFFLOAD_UDP_CKSUM | \
143                                      DEV_RX_OFFLOAD_TCP_CKSUM | \
144                                      DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
145                                      DEV_RX_OFFLOAD_JUMBO_FRAME | \
146                                      DEV_RX_OFFLOAD_CRC_STRIP | \
147                                      DEV_RX_OFFLOAD_TCP_LRO)
148
149 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
150 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
151
152 /***********************/
153
154 /*
155  * High level utility functions
156  */
157
158 static void bnxt_free_mem(struct bnxt *bp)
159 {
160         bnxt_free_filter_mem(bp);
161         bnxt_free_vnic_attributes(bp);
162         bnxt_free_vnic_mem(bp);
163
164         bnxt_free_stats(bp);
165         bnxt_free_tx_rings(bp);
166         bnxt_free_rx_rings(bp);
167         bnxt_free_def_cp_ring(bp);
168 }
169
170 static int bnxt_alloc_mem(struct bnxt *bp)
171 {
172         int rc;
173
174         /* Default completion ring */
175         rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
176         if (rc)
177                 goto alloc_mem_err;
178
179         rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
180                               bp->def_cp_ring, "def_cp");
181         if (rc)
182                 goto alloc_mem_err;
183
184         rc = bnxt_alloc_vnic_mem(bp);
185         if (rc)
186                 goto alloc_mem_err;
187
188         rc = bnxt_alloc_vnic_attributes(bp);
189         if (rc)
190                 goto alloc_mem_err;
191
192         rc = bnxt_alloc_filter_mem(bp);
193         if (rc)
194                 goto alloc_mem_err;
195
196         return 0;
197
198 alloc_mem_err:
199         bnxt_free_mem(bp);
200         return rc;
201 }
202
203 static int bnxt_init_chip(struct bnxt *bp)
204 {
205         unsigned int i;
206         struct rte_eth_link new;
207         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
208         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
209         uint32_t intr_vector = 0;
210         uint32_t queue_id, base = BNXT_MISC_VEC_ID;
211         uint32_t vec = BNXT_MISC_VEC_ID;
212         int rc;
213
214         /* disable uio/vfio intr/eventfd mapping */
215         rte_intr_disable(intr_handle);
216
217         if (bp->eth_dev->data->mtu > ETHER_MTU) {
218                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
219                         DEV_RX_OFFLOAD_JUMBO_FRAME;
220                 bp->flags |= BNXT_FLAG_JUMBO;
221         } else {
222                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
223                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
224                 bp->flags &= ~BNXT_FLAG_JUMBO;
225         }
226
227         rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
228         if (rc) {
229                 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
230                 goto err_out;
231         }
232
233         rc = bnxt_alloc_hwrm_rings(bp);
234         if (rc) {
235                 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
236                 goto err_out;
237         }
238
239         rc = bnxt_alloc_all_hwrm_ring_grps(bp);
240         if (rc) {
241                 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
242                 goto err_out;
243         }
244
245         rc = bnxt_mq_rx_configure(bp);
246         if (rc) {
247                 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
248                 goto err_out;
249         }
250
251         /* VNIC configuration */
252         for (i = 0; i < bp->nr_vnics; i++) {
253                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
254
255                 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
256                 if (rc) {
257                         PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
258                                 i, rc);
259                         goto err_out;
260                 }
261
262                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
263                 if (rc) {
264                         PMD_DRV_LOG(ERR,
265                                 "HWRM vnic %d ctx alloc failure rc: %x\n",
266                                 i, rc);
267                         goto err_out;
268                 }
269
270                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
271                 if (rc) {
272                         PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
273                                 i, rc);
274                         goto err_out;
275                 }
276
277                 rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
278                 if (rc) {
279                         PMD_DRV_LOG(ERR,
280                                 "HWRM vnic %d filter failure rc: %x\n",
281                                 i, rc);
282                         goto err_out;
283                 }
284
285                 rc = bnxt_vnic_rss_configure(bp, vnic);
286                 if (rc) {
287                         PMD_DRV_LOG(ERR,
288                                     "HWRM vnic set RSS failure rc: %x\n", rc);
289                         goto err_out;
290                 }
291
292                 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
293
294                 if (bp->eth_dev->data->dev_conf.rxmode.offloads &
295                     DEV_RX_OFFLOAD_TCP_LRO)
296                         bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
297                 else
298                         bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
299         }
300         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
301         if (rc) {
302                 PMD_DRV_LOG(ERR,
303                         "HWRM cfa l2 rx mask failure rc: %x\n", rc);
304                 goto err_out;
305         }
306
307         /* check and configure queue intr-vector mapping */
308         if ((rte_intr_cap_multiple(intr_handle) ||
309              !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
310             bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
311                 intr_vector = bp->eth_dev->data->nb_rx_queues;
312                 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
313                 if (intr_vector > bp->rx_cp_nr_rings) {
314                         PMD_DRV_LOG(ERR, "At most %d intr queues supported",
315                                         bp->rx_cp_nr_rings);
316                         return -ENOTSUP;
317                 }
318                 if (rte_intr_efd_enable(intr_handle, intr_vector))
319                         return -1;
320         }
321
322         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
323                 intr_handle->intr_vec =
324                         rte_zmalloc("intr_vec",
325                                     bp->eth_dev->data->nb_rx_queues *
326                                     sizeof(int), 0);
327                 if (intr_handle->intr_vec == NULL) {
328                         PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
329                                 " intr_vec", bp->eth_dev->data->nb_rx_queues);
330                         return -ENOMEM;
331                 }
332                 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
333                         "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
334                          intr_handle->intr_vec, intr_handle->nb_efd,
335                         intr_handle->max_intr);
336         }
337
338         for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
339              queue_id++) {
340                 intr_handle->intr_vec[queue_id] = vec;
341                 if (vec < base + intr_handle->nb_efd - 1)
342                         vec++;
343         }
344
345         /* enable uio/vfio intr/eventfd mapping */
346         rte_intr_enable(intr_handle);
347
348         rc = bnxt_get_hwrm_link_config(bp, &new);
349         if (rc) {
350                 PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
351                 goto err_out;
352         }
353
354         if (!bp->link_info.link_up) {
355                 rc = bnxt_set_hwrm_link_config(bp, true);
356                 if (rc) {
357                         PMD_DRV_LOG(ERR,
358                                 "HWRM link config failure rc: %x\n", rc);
359                         goto err_out;
360                 }
361         }
362         bnxt_print_link_info(bp->eth_dev);
363
364         return 0;
365
366 err_out:
367         bnxt_free_all_hwrm_resources(bp);
368
369         /* Some of the error status returned by FW may not be from errno.h */
370         if (rc > 0)
371                 rc = -EIO;
372
373         return rc;
374 }
375
376 static int bnxt_shutdown_nic(struct bnxt *bp)
377 {
378         bnxt_free_all_hwrm_resources(bp);
379         bnxt_free_all_filters(bp);
380         bnxt_free_all_vnics(bp);
381         return 0;
382 }
383
384 static int bnxt_init_nic(struct bnxt *bp)
385 {
386         int rc;
387
388         rc = bnxt_init_ring_grps(bp);
389         if (rc)
390                 return rc;
391
392         bnxt_init_vnics(bp);
393         bnxt_init_filters(bp);
394
395         rc = bnxt_init_chip(bp);
396         if (rc)
397                 return rc;
398
399         return 0;
400 }
401
402 /*
403  * Device configuration and status function
404  */
405
406 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
407                                   struct rte_eth_dev_info *dev_info)
408 {
409         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
410         uint16_t max_vnics, i, j, vpool, vrxq;
411         unsigned int max_rx_rings;
412
413         /* MAC Specifics */
414         dev_info->max_mac_addrs = bp->max_l2_ctx;
415         dev_info->max_hash_mac_addrs = 0;
416
417         /* PF/VF specifics */
418         if (BNXT_PF(bp))
419                 dev_info->max_vfs = bp->pdev->max_vfs;
420         max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
421                                                 RTE_MIN(bp->max_rsscos_ctx,
422                                                 bp->max_stat_ctx)));
423         /* For the sake of symmetry, max_rx_queues = max_tx_queues */
424         dev_info->max_rx_queues = max_rx_rings;
425         dev_info->max_tx_queues = max_rx_rings;
426         dev_info->reta_size = bp->max_rsscos_ctx;
427         dev_info->hash_key_size = 40;
428         max_vnics = bp->max_vnics;
429
430         /* Fast path specifics */
431         dev_info->min_rx_bufsize = 1;
432         dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
433                                   + VLAN_TAG_SIZE;
434
435         dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
436         if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
437                 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
438         dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
439         dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
440
441         /* *INDENT-OFF* */
442         dev_info->default_rxconf = (struct rte_eth_rxconf) {
443                 .rx_thresh = {
444                         .pthresh = 8,
445                         .hthresh = 8,
446                         .wthresh = 0,
447                 },
448                 .rx_free_thresh = 32,
449                 /* If no descriptors available, pkts are dropped by default */
450                 .rx_drop_en = 1,
451         };
452
453         dev_info->default_txconf = (struct rte_eth_txconf) {
454                 .tx_thresh = {
455                         .pthresh = 32,
456                         .hthresh = 0,
457                         .wthresh = 0,
458                 },
459                 .tx_free_thresh = 32,
460                 .tx_rs_thresh = 32,
461         };
462         eth_dev->data->dev_conf.intr_conf.lsc = 1;
463
464         eth_dev->data->dev_conf.intr_conf.rxq = 1;
465
466         /* *INDENT-ON* */
467
468         /*
469          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
470          *       need further investigation.
471          */
472
473         /* VMDq resources */
474         vpool = 64; /* ETH_64_POOLS */
475         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
476         for (i = 0; i < 4; vpool >>= 1, i++) {
477                 if (max_vnics > vpool) {
478                         for (j = 0; j < 5; vrxq >>= 1, j++) {
479                                 if (dev_info->max_rx_queues > vrxq) {
480                                         if (vpool > vrxq)
481                                                 vpool = vrxq;
482                                         goto found;
483                                 }
484                         }
485                         /* Not enough resources to support VMDq */
486                         break;
487                 }
488         }
489         /* Not enough resources to support VMDq */
490         vpool = 0;
491         vrxq = 0;
492 found:
493         dev_info->max_vmdq_pools = vpool;
494         dev_info->vmdq_queue_num = vrxq;
495
496         dev_info->vmdq_pool_base = 0;
497         dev_info->vmdq_queue_base = 0;
498 }
499
500 /* Configure the device based on the configuration provided */
501 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
502 {
503         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
504         uint64_t tx_offloads = eth_dev->data->dev_conf.txmode.offloads;
505         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
506
507         if (tx_offloads != (tx_offloads & BNXT_DEV_TX_OFFLOAD_SUPPORT)) {
508                 PMD_DRV_LOG
509                         (ERR,
510                          "Tx offloads requested 0x%" PRIx64 " supported 0x%x\n",
511                          tx_offloads, BNXT_DEV_TX_OFFLOAD_SUPPORT);
512                 return -ENOTSUP;
513         }
514
515         if (rx_offloads != (rx_offloads & BNXT_DEV_RX_OFFLOAD_SUPPORT)) {
516                 PMD_DRV_LOG
517                         (ERR,
518                          "Rx offloads requested 0x%" PRIx64 " supported 0x%x\n",
519                             rx_offloads, BNXT_DEV_RX_OFFLOAD_SUPPORT);
520                 return -ENOTSUP;
521         }
522
523         bp->rx_queues = (void *)eth_dev->data->rx_queues;
524         bp->tx_queues = (void *)eth_dev->data->tx_queues;
525
526         /* Inherit new configurations */
527         if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
528             eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
529             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues + 1 >
530             bp->max_cp_rings ||
531             eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
532             bp->max_stat_ctx ||
533             (uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) {
534                 PMD_DRV_LOG(ERR,
535                         "Insufficient resources to support requested config\n");
536                 PMD_DRV_LOG(ERR,
537                         "Num Queues Requested: Tx %d, Rx %d\n",
538                         eth_dev->data->nb_tx_queues,
539                         eth_dev->data->nb_rx_queues);
540                 PMD_DRV_LOG(ERR,
541                         "Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n",
542                         bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
543                         bp->max_stat_ctx, bp->max_ring_grps);
544                 return -ENOSPC;
545         }
546
547         bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
548         bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
549         bp->rx_cp_nr_rings = bp->rx_nr_rings;
550         bp->tx_cp_nr_rings = bp->tx_nr_rings;
551
552         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
553                 eth_dev->data->mtu =
554                                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
555                                 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
556         return 0;
557 }
558
559 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
560 {
561         struct rte_eth_link *link = &eth_dev->data->dev_link;
562
563         if (link->link_status)
564                 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
565                         eth_dev->data->port_id,
566                         (uint32_t)link->link_speed,
567                         (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
568                         ("full-duplex") : ("half-duplex\n"));
569         else
570                 PMD_DRV_LOG(INFO, "Port %d Link Down\n",
571                         eth_dev->data->port_id);
572 }
573
574 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
575 {
576         bnxt_print_link_info(eth_dev);
577         return 0;
578 }
579
580 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
581 {
582         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
583         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
584         int vlan_mask = 0;
585         int rc;
586
587         if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
588                 PMD_DRV_LOG(ERR,
589                         "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
590                         bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
591         }
592         bp->dev_stopped = 0;
593
594         rc = bnxt_init_nic(bp);
595         if (rc)
596                 goto error;
597
598         bnxt_link_update_op(eth_dev, 1);
599
600         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
601                 vlan_mask |= ETH_VLAN_FILTER_MASK;
602         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
603                 vlan_mask |= ETH_VLAN_STRIP_MASK;
604         rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
605         if (rc)
606                 goto error;
607
608         bp->flags |= BNXT_FLAG_INIT_DONE;
609         return 0;
610
611 error:
612         bnxt_shutdown_nic(bp);
613         bnxt_free_tx_mbufs(bp);
614         bnxt_free_rx_mbufs(bp);
615         return rc;
616 }
617
618 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
619 {
620         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
621         int rc = 0;
622
623         if (!bp->link_info.link_up)
624                 rc = bnxt_set_hwrm_link_config(bp, true);
625         if (!rc)
626                 eth_dev->data->dev_link.link_status = 1;
627
628         bnxt_print_link_info(eth_dev);
629         return 0;
630 }
631
632 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
633 {
634         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
635
636         eth_dev->data->dev_link.link_status = 0;
637         bnxt_set_hwrm_link_config(bp, false);
638         bp->link_info.link_up = 0;
639
640         return 0;
641 }
642
643 /* Unload the driver, release resources */
644 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
645 {
646         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
647
648         if (bp->eth_dev->data->dev_started) {
649                 /* TBD: STOP HW queues DMA */
650                 eth_dev->data->dev_link.link_status = 0;
651         }
652         bnxt_set_hwrm_link_config(bp, false);
653         bnxt_hwrm_port_clr_stats(bp);
654         bp->flags &= ~BNXT_FLAG_INIT_DONE;
655         bnxt_shutdown_nic(bp);
656         bp->dev_stopped = 1;
657 }
658
659 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
660 {
661         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
662
663         if (bp->dev_stopped == 0)
664                 bnxt_dev_stop_op(eth_dev);
665
666         bnxt_free_tx_mbufs(bp);
667         bnxt_free_rx_mbufs(bp);
668         bnxt_free_mem(bp);
669         if (eth_dev->data->mac_addrs != NULL) {
670                 rte_free(eth_dev->data->mac_addrs);
671                 eth_dev->data->mac_addrs = NULL;
672         }
673         if (bp->grp_info != NULL) {
674                 rte_free(bp->grp_info);
675                 bp->grp_info = NULL;
676         }
677 }
678
679 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
680                                     uint32_t index)
681 {
682         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
683         uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
684         struct bnxt_vnic_info *vnic;
685         struct bnxt_filter_info *filter, *temp_filter;
686         uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS);
687         uint32_t i;
688
689         /*
690          * Loop through all VNICs from the specified filter flow pools to
691          * remove the corresponding MAC addr filter
692          */
693         for (i = 0; i < pool; i++) {
694                 if (!(pool_mask & (1ULL << i)))
695                         continue;
696
697                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
698                         filter = STAILQ_FIRST(&vnic->filter);
699                         while (filter) {
700                                 temp_filter = STAILQ_NEXT(filter, next);
701                                 if (filter->mac_index == index) {
702                                         STAILQ_REMOVE(&vnic->filter, filter,
703                                                       bnxt_filter_info, next);
704                                         bnxt_hwrm_clear_l2_filter(bp, filter);
705                                         filter->mac_index = INVALID_MAC_INDEX;
706                                         memset(&filter->l2_addr, 0,
707                                                ETHER_ADDR_LEN);
708                                         STAILQ_INSERT_TAIL(
709                                                         &bp->free_filter_list,
710                                                         filter, next);
711                                 }
712                                 filter = temp_filter;
713                         }
714                 }
715         }
716 }
717
718 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
719                                 struct ether_addr *mac_addr,
720                                 uint32_t index, uint32_t pool)
721 {
722         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
723         struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
724         struct bnxt_filter_info *filter;
725
726         if (BNXT_VF(bp)) {
727                 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
728                 return -ENOTSUP;
729         }
730
731         if (!vnic) {
732                 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
733                 return -EINVAL;
734         }
735         /* Attach requested MAC address to the new l2_filter */
736         STAILQ_FOREACH(filter, &vnic->filter, next) {
737                 if (filter->mac_index == index) {
738                         PMD_DRV_LOG(ERR,
739                                 "MAC addr already existed for pool %d\n", pool);
740                         return 0;
741                 }
742         }
743         filter = bnxt_alloc_filter(bp);
744         if (!filter) {
745                 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
746                 return -ENODEV;
747         }
748         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
749         filter->mac_index = index;
750         memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
751         return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
752 }
753
754 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
755 {
756         int rc = 0;
757         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
758         struct rte_eth_link new;
759         unsigned int cnt = BNXT_LINK_WAIT_CNT;
760
761         memset(&new, 0, sizeof(new));
762         do {
763                 /* Retrieve link info from hardware */
764                 rc = bnxt_get_hwrm_link_config(bp, &new);
765                 if (rc) {
766                         new.link_speed = ETH_LINK_SPEED_100M;
767                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
768                         PMD_DRV_LOG(ERR,
769                                 "Failed to retrieve link rc = 0x%x!\n", rc);
770                         goto out;
771                 }
772                 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
773
774                 if (!wait_to_complete)
775                         break;
776         } while (!new.link_status && cnt--);
777
778 out:
779         /* Timed out or success */
780         if (new.link_status != eth_dev->data->dev_link.link_status ||
781         new.link_speed != eth_dev->data->dev_link.link_speed) {
782                 memcpy(&eth_dev->data->dev_link, &new,
783                         sizeof(struct rte_eth_link));
784                 bnxt_print_link_info(eth_dev);
785         }
786
787         return rc;
788 }
789
790 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
791 {
792         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
793         struct bnxt_vnic_info *vnic;
794
795         if (bp->vnic_info == NULL)
796                 return;
797
798         vnic = &bp->vnic_info[0];
799
800         vnic->flags |= BNXT_VNIC_INFO_PROMISC;
801         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
802 }
803
804 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
805 {
806         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
807         struct bnxt_vnic_info *vnic;
808
809         if (bp->vnic_info == NULL)
810                 return;
811
812         vnic = &bp->vnic_info[0];
813
814         vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
815         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
816 }
817
818 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
819 {
820         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
821         struct bnxt_vnic_info *vnic;
822
823         if (bp->vnic_info == NULL)
824                 return;
825
826         vnic = &bp->vnic_info[0];
827
828         vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
829         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
830 }
831
832 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
833 {
834         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
835         struct bnxt_vnic_info *vnic;
836
837         if (bp->vnic_info == NULL)
838                 return;
839
840         vnic = &bp->vnic_info[0];
841
842         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
843         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
844 }
845
846 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
847                             struct rte_eth_rss_reta_entry64 *reta_conf,
848                             uint16_t reta_size)
849 {
850         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
851         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
852         struct bnxt_vnic_info *vnic;
853         int i;
854
855         if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
856                 return -EINVAL;
857
858         if (reta_size != HW_HASH_INDEX_SIZE) {
859                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
860                         "(%d) must equal the size supported by the hardware "
861                         "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
862                 return -EINVAL;
863         }
864         /* Update the RSS VNIC(s) */
865         for (i = 0; i < MAX_FF_POOLS; i++) {
866                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
867                         memcpy(vnic->rss_table, reta_conf, reta_size);
868
869                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
870                 }
871         }
872         return 0;
873 }
874
875 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
876                               struct rte_eth_rss_reta_entry64 *reta_conf,
877                               uint16_t reta_size)
878 {
879         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
880         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
881         struct rte_intr_handle *intr_handle
882                 = &bp->pdev->intr_handle;
883
884         /* Retrieve from the default VNIC */
885         if (!vnic)
886                 return -EINVAL;
887         if (!vnic->rss_table)
888                 return -EINVAL;
889
890         if (reta_size != HW_HASH_INDEX_SIZE) {
891                 PMD_DRV_LOG(ERR, "The configured hash table lookup size "
892                         "(%d) must equal the size supported by the hardware "
893                         "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
894                 return -EINVAL;
895         }
896         /* EW - need to revisit here copying from uint64_t to uint16_t */
897         memcpy(reta_conf, vnic->rss_table, reta_size);
898
899         if (rte_intr_allow_others(intr_handle)) {
900                 if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
901                         bnxt_dev_lsc_intr_setup(eth_dev);
902         }
903
904         return 0;
905 }
906
907 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
908                                    struct rte_eth_rss_conf *rss_conf)
909 {
910         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
911         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
912         struct bnxt_vnic_info *vnic;
913         uint16_t hash_type = 0;
914         int i;
915
916         /*
917          * If RSS enablement were different than dev_configure,
918          * then return -EINVAL
919          */
920         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
921                 if (!rss_conf->rss_hf)
922                         PMD_DRV_LOG(ERR, "Hash type NONE\n");
923         } else {
924                 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
925                         return -EINVAL;
926         }
927
928         bp->flags |= BNXT_FLAG_UPDATE_HASH;
929         memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf));
930
931         if (rss_conf->rss_hf & ETH_RSS_IPV4)
932                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
933         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
934                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
935         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
936                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
937         if (rss_conf->rss_hf & ETH_RSS_IPV6)
938                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
939         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
940                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
941         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
942                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
943
944         /* Update the RSS VNIC(s) */
945         for (i = 0; i < MAX_FF_POOLS; i++) {
946                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
947                         vnic->hash_type = hash_type;
948
949                         /*
950                          * Use the supplied key if the key length is
951                          * acceptable and the rss_key is not NULL
952                          */
953                         if (rss_conf->rss_key &&
954                             rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
955                                 memcpy(vnic->rss_hash_key, rss_conf->rss_key,
956                                        rss_conf->rss_key_len);
957
958                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
959                 }
960         }
961         return 0;
962 }
963
964 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
965                                      struct rte_eth_rss_conf *rss_conf)
966 {
967         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
968         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
969         int len;
970         uint32_t hash_types;
971
972         /* RSS configuration is the same for all VNICs */
973         if (vnic && vnic->rss_hash_key) {
974                 if (rss_conf->rss_key) {
975                         len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
976                               rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
977                         memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
978                 }
979
980                 hash_types = vnic->hash_type;
981                 rss_conf->rss_hf = 0;
982                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
983                         rss_conf->rss_hf |= ETH_RSS_IPV4;
984                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
985                 }
986                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
987                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
988                         hash_types &=
989                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
990                 }
991                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
992                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
993                         hash_types &=
994                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
995                 }
996                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
997                         rss_conf->rss_hf |= ETH_RSS_IPV6;
998                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
999                 }
1000                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
1001                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1002                         hash_types &=
1003                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
1004                 }
1005                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
1006                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1007                         hash_types &=
1008                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1009                 }
1010                 if (hash_types) {
1011                         PMD_DRV_LOG(ERR,
1012                                 "Unknwon RSS config from firmware (%08x), RSS disabled",
1013                                 vnic->hash_type);
1014                         return -ENOTSUP;
1015                 }
1016         } else {
1017                 rss_conf->rss_hf = 0;
1018         }
1019         return 0;
1020 }
1021
1022 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
1023                                struct rte_eth_fc_conf *fc_conf)
1024 {
1025         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1026         struct rte_eth_link link_info;
1027         int rc;
1028
1029         rc = bnxt_get_hwrm_link_config(bp, &link_info);
1030         if (rc)
1031                 return rc;
1032
1033         memset(fc_conf, 0, sizeof(*fc_conf));
1034         if (bp->link_info.auto_pause)
1035                 fc_conf->autoneg = 1;
1036         switch (bp->link_info.pause) {
1037         case 0:
1038                 fc_conf->mode = RTE_FC_NONE;
1039                 break;
1040         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
1041                 fc_conf->mode = RTE_FC_TX_PAUSE;
1042                 break;
1043         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
1044                 fc_conf->mode = RTE_FC_RX_PAUSE;
1045                 break;
1046         case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
1047                         HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
1048                 fc_conf->mode = RTE_FC_FULL;
1049                 break;
1050         }
1051         return 0;
1052 }
1053
1054 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
1055                                struct rte_eth_fc_conf *fc_conf)
1056 {
1057         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1058
1059         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1060                 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
1061                 return -ENOTSUP;
1062         }
1063
1064         switch (fc_conf->mode) {
1065         case RTE_FC_NONE:
1066                 bp->link_info.auto_pause = 0;
1067                 bp->link_info.force_pause = 0;
1068                 break;
1069         case RTE_FC_RX_PAUSE:
1070                 if (fc_conf->autoneg) {
1071                         bp->link_info.auto_pause =
1072                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1073                         bp->link_info.force_pause = 0;
1074                 } else {
1075                         bp->link_info.auto_pause = 0;
1076                         bp->link_info.force_pause =
1077                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1078                 }
1079                 break;
1080         case RTE_FC_TX_PAUSE:
1081                 if (fc_conf->autoneg) {
1082                         bp->link_info.auto_pause =
1083                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
1084                         bp->link_info.force_pause = 0;
1085                 } else {
1086                         bp->link_info.auto_pause = 0;
1087                         bp->link_info.force_pause =
1088                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
1089                 }
1090                 break;
1091         case RTE_FC_FULL:
1092                 if (fc_conf->autoneg) {
1093                         bp->link_info.auto_pause =
1094                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
1095                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
1096                         bp->link_info.force_pause = 0;
1097                 } else {
1098                         bp->link_info.auto_pause = 0;
1099                         bp->link_info.force_pause =
1100                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
1101                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
1102                 }
1103                 break;
1104         }
1105         return bnxt_set_hwrm_link_config(bp, true);
1106 }
1107
1108 /* Add UDP tunneling port */
1109 static int
1110 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
1111                          struct rte_eth_udp_tunnel *udp_tunnel)
1112 {
1113         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1114         uint16_t tunnel_type = 0;
1115         int rc = 0;
1116
1117         switch (udp_tunnel->prot_type) {
1118         case RTE_TUNNEL_TYPE_VXLAN:
1119                 if (bp->vxlan_port_cnt) {
1120                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1121                                 udp_tunnel->udp_port);
1122                         if (bp->vxlan_port != udp_tunnel->udp_port) {
1123                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
1124                                 return -ENOSPC;
1125                         }
1126                         bp->vxlan_port_cnt++;
1127                         return 0;
1128                 }
1129                 tunnel_type =
1130                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
1131                 bp->vxlan_port_cnt++;
1132                 break;
1133         case RTE_TUNNEL_TYPE_GENEVE:
1134                 if (bp->geneve_port_cnt) {
1135                         PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
1136                                 udp_tunnel->udp_port);
1137                         if (bp->geneve_port != udp_tunnel->udp_port) {
1138                                 PMD_DRV_LOG(ERR, "Only one port allowed\n");
1139                                 return -ENOSPC;
1140                         }
1141                         bp->geneve_port_cnt++;
1142                         return 0;
1143                 }
1144                 tunnel_type =
1145                         HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE;
1146                 bp->geneve_port_cnt++;
1147                 break;
1148         default:
1149                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1150                 return -ENOTSUP;
1151         }
1152         rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
1153                                              tunnel_type);
1154         return rc;
1155 }
1156
1157 static int
1158 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
1159                          struct rte_eth_udp_tunnel *udp_tunnel)
1160 {
1161         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1162         uint16_t tunnel_type = 0;
1163         uint16_t port = 0;
1164         int rc = 0;
1165
1166         switch (udp_tunnel->prot_type) {
1167         case RTE_TUNNEL_TYPE_VXLAN:
1168                 if (!bp->vxlan_port_cnt) {
1169                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1170                         return -EINVAL;
1171                 }
1172                 if (bp->vxlan_port != udp_tunnel->udp_port) {
1173                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1174                                 udp_tunnel->udp_port, bp->vxlan_port);
1175                         return -EINVAL;
1176                 }
1177                 if (--bp->vxlan_port_cnt)
1178                         return 0;
1179
1180                 tunnel_type =
1181                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
1182                 port = bp->vxlan_fw_dst_port_id;
1183                 break;
1184         case RTE_TUNNEL_TYPE_GENEVE:
1185                 if (!bp->geneve_port_cnt) {
1186                         PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
1187                         return -EINVAL;
1188                 }
1189                 if (bp->geneve_port != udp_tunnel->udp_port) {
1190                         PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
1191                                 udp_tunnel->udp_port, bp->geneve_port);
1192                         return -EINVAL;
1193                 }
1194                 if (--bp->geneve_port_cnt)
1195                         return 0;
1196
1197                 tunnel_type =
1198                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE;
1199                 port = bp->geneve_fw_dst_port_id;
1200                 break;
1201         default:
1202                 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
1203                 return -ENOTSUP;
1204         }
1205
1206         rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
1207         if (!rc) {
1208                 if (tunnel_type ==
1209                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
1210                         bp->vxlan_port = 0;
1211                 if (tunnel_type ==
1212                     HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
1213                         bp->geneve_port = 0;
1214         }
1215         return rc;
1216 }
1217
1218 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1219 {
1220         struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1221         struct bnxt_vnic_info *vnic;
1222         unsigned int i;
1223         int rc = 0;
1224         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1225
1226         /* Cycle through all VNICs */
1227         for (i = 0; i < bp->nr_vnics; i++) {
1228                 /*
1229                  * For each VNIC and each associated filter(s)
1230                  * if VLAN exists && VLAN matches vlan_id
1231                  *      remove the MAC+VLAN filter
1232                  *      add a new MAC only filter
1233                  * else
1234                  *      VLAN filter doesn't exist, just skip and continue
1235                  */
1236                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1237                         filter = STAILQ_FIRST(&vnic->filter);
1238                         while (filter) {
1239                                 temp_filter = STAILQ_NEXT(filter, next);
1240
1241                                 if (filter->enables & chk &&
1242                                     filter->l2_ovlan == vlan_id) {
1243                                         /* Must delete the filter */
1244                                         STAILQ_REMOVE(&vnic->filter, filter,
1245                                                       bnxt_filter_info, next);
1246                                         bnxt_hwrm_clear_l2_filter(bp, filter);
1247                                         STAILQ_INSERT_TAIL(
1248                                                         &bp->free_filter_list,
1249                                                         filter, next);
1250
1251                                         /*
1252                                          * Need to examine to see if the MAC
1253                                          * filter already existed or not before
1254                                          * allocating a new one
1255                                          */
1256
1257                                         new_filter = bnxt_alloc_filter(bp);
1258                                         if (!new_filter) {
1259                                                 PMD_DRV_LOG(ERR,
1260                                                         "MAC/VLAN filter alloc failed\n");
1261                                                 rc = -ENOMEM;
1262                                                 goto exit;
1263                                         }
1264                                         STAILQ_INSERT_TAIL(&vnic->filter,
1265                                                            new_filter, next);
1266                                         /* Inherit MAC from previous filter */
1267                                         new_filter->mac_index =
1268                                                         filter->mac_index;
1269                                         memcpy(new_filter->l2_addr,
1270                                                filter->l2_addr, ETHER_ADDR_LEN);
1271                                         /* MAC only filter */
1272                                         rc = bnxt_hwrm_set_l2_filter(bp,
1273                                                         vnic->fw_vnic_id,
1274                                                         new_filter);
1275                                         if (rc)
1276                                                 goto exit;
1277                                         PMD_DRV_LOG(INFO,
1278                                                 "Del Vlan filter for %d\n",
1279                                                 vlan_id);
1280                                 }
1281                                 filter = temp_filter;
1282                         }
1283                 }
1284         }
1285 exit:
1286         return rc;
1287 }
1288
1289 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
1290 {
1291         struct bnxt_filter_info *filter, *temp_filter, *new_filter;
1292         struct bnxt_vnic_info *vnic;
1293         unsigned int i;
1294         int rc = 0;
1295         uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
1296                 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
1297         uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
1298
1299         /* Cycle through all VNICs */
1300         for (i = 0; i < bp->nr_vnics; i++) {
1301                 /*
1302                  * For each VNIC and each associated filter(s)
1303                  * if VLAN exists:
1304                  *   if VLAN matches vlan_id
1305                  *      VLAN filter already exists, just skip and continue
1306                  *   else
1307                  *      add a new MAC+VLAN filter
1308                  * else
1309                  *   Remove the old MAC only filter
1310                  *    Add a new MAC+VLAN filter
1311                  */
1312                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
1313                         filter = STAILQ_FIRST(&vnic->filter);
1314                         while (filter) {
1315                                 temp_filter = STAILQ_NEXT(filter, next);
1316
1317                                 if (filter->enables & chk) {
1318                                         if (filter->l2_ovlan == vlan_id)
1319                                                 goto cont;
1320                                 } else {
1321                                         /* Must delete the MAC filter */
1322                                         STAILQ_REMOVE(&vnic->filter, filter,
1323                                                       bnxt_filter_info, next);
1324                                         bnxt_hwrm_clear_l2_filter(bp, filter);
1325                                         filter->l2_ovlan = 0;
1326                                         STAILQ_INSERT_TAIL(
1327                                                         &bp->free_filter_list,
1328                                                         filter, next);
1329                                 }
1330                                 new_filter = bnxt_alloc_filter(bp);
1331                                 if (!new_filter) {
1332                                         PMD_DRV_LOG(ERR,
1333                                                 "MAC/VLAN filter alloc failed\n");
1334                                         rc = -ENOMEM;
1335                                         goto exit;
1336                                 }
1337                                 STAILQ_INSERT_TAIL(&vnic->filter, new_filter,
1338                                                    next);
1339                                 /* Inherit MAC from the previous filter */
1340                                 new_filter->mac_index = filter->mac_index;
1341                                 memcpy(new_filter->l2_addr, filter->l2_addr,
1342                                        ETHER_ADDR_LEN);
1343                                 /* MAC + VLAN ID filter */
1344                                 new_filter->l2_ovlan = vlan_id;
1345                                 new_filter->l2_ovlan_mask = 0xF000;
1346                                 new_filter->enables |= en;
1347                                 rc = bnxt_hwrm_set_l2_filter(bp,
1348                                                              vnic->fw_vnic_id,
1349                                                              new_filter);
1350                                 if (rc)
1351                                         goto exit;
1352                                 PMD_DRV_LOG(INFO,
1353                                         "Added Vlan filter for %d\n", vlan_id);
1354 cont:
1355                                 filter = temp_filter;
1356                         }
1357                 }
1358         }
1359 exit:
1360         return rc;
1361 }
1362
1363 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
1364                                    uint16_t vlan_id, int on)
1365 {
1366         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1367
1368         /* These operations apply to ALL existing MAC/VLAN filters */
1369         if (on)
1370                 return bnxt_add_vlan_filter(bp, vlan_id);
1371         else
1372                 return bnxt_del_vlan_filter(bp, vlan_id);
1373 }
1374
1375 static int
1376 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
1377 {
1378         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1379         uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1380         unsigned int i;
1381
1382         if (mask & ETH_VLAN_FILTER_MASK) {
1383                 if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
1384                         /* Remove any VLAN filters programmed */
1385                         for (i = 0; i < 4095; i++)
1386                                 bnxt_del_vlan_filter(bp, i);
1387                 }
1388                 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
1389                         !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
1390         }
1391
1392         if (mask & ETH_VLAN_STRIP_MASK) {
1393                 /* Enable or disable VLAN stripping */
1394                 for (i = 0; i < bp->nr_vnics; i++) {
1395                         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1396                         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1397                                 vnic->vlan_strip = true;
1398                         else
1399                                 vnic->vlan_strip = false;
1400                         bnxt_hwrm_vnic_cfg(bp, vnic);
1401                 }
1402                 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
1403                         !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
1404         }
1405
1406         if (mask & ETH_VLAN_EXTEND_MASK)
1407                 PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n");
1408
1409         return 0;
1410 }
1411
1412 static int
1413 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
1414 {
1415         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1416         /* Default Filter is tied to VNIC 0 */
1417         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
1418         struct bnxt_filter_info *filter;
1419         int rc;
1420
1421         if (BNXT_VF(bp))
1422                 return -EPERM;
1423
1424         memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
1425
1426         STAILQ_FOREACH(filter, &vnic->filter, next) {
1427                 /* Default Filter is at Index 0 */
1428                 if (filter->mac_index != 0)
1429                         continue;
1430                 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1431                 if (rc)
1432                         return rc;
1433                 memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
1434                 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
1435                 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
1436                 filter->enables |=
1437                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
1438                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
1439                 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
1440                 if (rc)
1441                         return rc;
1442                 filter->mac_index = 0;
1443                 PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
1444         }
1445
1446         return 0;
1447 }
1448
1449 static int
1450 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
1451                           struct ether_addr *mc_addr_set,
1452                           uint32_t nb_mc_addr)
1453 {
1454         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
1455         char *mc_addr_list = (char *)mc_addr_set;
1456         struct bnxt_vnic_info *vnic;
1457         uint32_t off = 0, i = 0;
1458
1459         vnic = &bp->vnic_info[0];
1460
1461         if (nb_mc_addr > BNXT_MAX_MC_ADDRS) {
1462                 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
1463                 goto allmulti;
1464         }
1465
1466         /* TODO Check for Duplicate mcast addresses */
1467         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
1468         for (i = 0; i < nb_mc_addr; i++) {
1469                 memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
1470                 off += ETHER_ADDR_LEN;
1471         }
1472
1473         vnic->mc_addr_cnt = i;
1474
1475 allmulti:
1476         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
1477 }
1478
1479 static int
1480 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1481 {
1482         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1483         uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
1484         uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
1485         uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
1486         int ret;
1487
1488         ret = snprintf(fw_version, fw_size, "%d.%d.%d",
1489                         fw_major, fw_minor, fw_updt);
1490
1491         ret += 1; /* add the size of '\0' */
1492         if (fw_size < (uint32_t)ret)
1493                 return ret;
1494         else
1495                 return 0;
1496 }
1497
1498 static void
1499 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1500         struct rte_eth_rxq_info *qinfo)
1501 {
1502         struct bnxt_rx_queue *rxq;
1503
1504         rxq = dev->data->rx_queues[queue_id];
1505
1506         qinfo->mp = rxq->mb_pool;
1507         qinfo->scattered_rx = dev->data->scattered_rx;
1508         qinfo->nb_desc = rxq->nb_rx_desc;
1509
1510         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1511         qinfo->conf.rx_drop_en = 0;
1512         qinfo->conf.rx_deferred_start = 0;
1513 }
1514
1515 static void
1516 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
1517         struct rte_eth_txq_info *qinfo)
1518 {
1519         struct bnxt_tx_queue *txq;
1520
1521         txq = dev->data->tx_queues[queue_id];
1522
1523         qinfo->nb_desc = txq->nb_tx_desc;
1524
1525         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1526         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1527         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1528
1529         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1530         qinfo->conf.tx_rs_thresh = 0;
1531         qinfo->conf.txq_flags = txq->txq_flags;
1532         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1533 }
1534
1535 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
1536 {
1537         struct bnxt *bp = eth_dev->data->dev_private;
1538         struct rte_eth_dev_info dev_info;
1539         uint32_t max_dev_mtu;
1540         uint32_t rc = 0;
1541         uint32_t i;
1542
1543         bnxt_dev_info_get_op(eth_dev, &dev_info);
1544         max_dev_mtu = dev_info.max_rx_pktlen -
1545                       ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
1546
1547         if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
1548                 PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
1549                         ETHER_MIN_MTU, max_dev_mtu);
1550                 return -EINVAL;
1551         }
1552
1553
1554         if (new_mtu > ETHER_MTU) {
1555                 bp->flags |= BNXT_FLAG_JUMBO;
1556                 bp->eth_dev->data->dev_conf.rxmode.offloads |=
1557                         DEV_RX_OFFLOAD_JUMBO_FRAME;
1558         } else {
1559                 bp->eth_dev->data->dev_conf.rxmode.offloads &=
1560                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1561                 bp->flags &= ~BNXT_FLAG_JUMBO;
1562         }
1563
1564         eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
1565                 new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1566
1567         eth_dev->data->mtu = new_mtu;
1568         PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
1569
1570         for (i = 0; i < bp->nr_vnics; i++) {
1571                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1572
1573                 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1574                                         ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
1575                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
1576                 if (rc)
1577                         break;
1578
1579                 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
1580                 if (rc)
1581                         return rc;
1582         }
1583
1584         return rc;
1585 }
1586
1587 static int
1588 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
1589 {
1590         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1591         uint16_t vlan = bp->vlan;
1592         int rc;
1593
1594         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
1595                 PMD_DRV_LOG(ERR,
1596                         "PVID cannot be modified for this function\n");
1597                 return -ENOTSUP;
1598         }
1599         bp->vlan = on ? pvid : 0;
1600
1601         rc = bnxt_hwrm_set_default_vlan(bp, 0, 0);
1602         if (rc)
1603                 bp->vlan = vlan;
1604         return rc;
1605 }
1606
1607 static int
1608 bnxt_dev_led_on_op(struct rte_eth_dev *dev)
1609 {
1610         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1611
1612         return bnxt_hwrm_port_led_cfg(bp, true);
1613 }
1614
1615 static int
1616 bnxt_dev_led_off_op(struct rte_eth_dev *dev)
1617 {
1618         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1619
1620         return bnxt_hwrm_port_led_cfg(bp, false);
1621 }
1622
1623 static uint32_t
1624 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1625 {
1626         uint32_t desc = 0, raw_cons = 0, cons;
1627         struct bnxt_cp_ring_info *cpr;
1628         struct bnxt_rx_queue *rxq;
1629         struct rx_pkt_cmpl *rxcmp;
1630         uint16_t cmp_type;
1631         uint8_t cmp = 1;
1632         bool valid;
1633
1634         rxq = dev->data->rx_queues[rx_queue_id];
1635         cpr = rxq->cp_ring;
1636         valid = cpr->valid;
1637
1638         while (raw_cons < rxq->nb_rx_desc) {
1639                 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
1640                 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1641
1642                 if (!CMPL_VALID(rxcmp, valid))
1643                         goto nothing_to_do;
1644                 valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid);
1645                 cmp_type = CMP_TYPE(rxcmp);
1646                 if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
1647                         cmp = (rte_le_to_cpu_32(
1648                                         ((struct rx_tpa_end_cmpl *)
1649                                          (rxcmp))->agg_bufs_v1) &
1650                                RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
1651                                 RX_TPA_END_CMPL_AGG_BUFS_SFT;
1652                         desc++;
1653                 } else if (cmp_type == 0x11) {
1654                         desc++;
1655                         cmp = (rxcmp->agg_bufs_v1 &
1656                                    RX_PKT_CMPL_AGG_BUFS_MASK) >>
1657                                 RX_PKT_CMPL_AGG_BUFS_SFT;
1658                 } else {
1659                         cmp = 1;
1660                 }
1661 nothing_to_do:
1662                 raw_cons += cmp ? cmp : 2;
1663         }
1664
1665         return desc;
1666 }
1667
1668 static int
1669 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
1670 {
1671         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
1672         struct bnxt_rx_ring_info *rxr;
1673         struct bnxt_cp_ring_info *cpr;
1674         struct bnxt_sw_rx_bd *rx_buf;
1675         struct rx_pkt_cmpl *rxcmp;
1676         uint32_t cons, cp_cons;
1677
1678         if (!rxq)
1679                 return -EINVAL;
1680
1681         cpr = rxq->cp_ring;
1682         rxr = rxq->rx_ring;
1683
1684         if (offset >= rxq->nb_rx_desc)
1685                 return -EINVAL;
1686
1687         cons = RING_CMP(cpr->cp_ring_struct, offset);
1688         cp_cons = cpr->cp_raw_cons;
1689         rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1690
1691         if (cons > cp_cons) {
1692                 if (CMPL_VALID(rxcmp, cpr->valid))
1693                         return RTE_ETH_RX_DESC_DONE;
1694         } else {
1695                 if (CMPL_VALID(rxcmp, !cpr->valid))
1696                         return RTE_ETH_RX_DESC_DONE;
1697         }
1698         rx_buf = &rxr->rx_buf_ring[cons];
1699         if (rx_buf->mbuf == NULL)
1700                 return RTE_ETH_RX_DESC_UNAVAIL;
1701
1702
1703         return RTE_ETH_RX_DESC_AVAIL;
1704 }
1705
1706 static int
1707 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
1708 {
1709         struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
1710         struct bnxt_tx_ring_info *txr;
1711         struct bnxt_cp_ring_info *cpr;
1712         struct bnxt_sw_tx_bd *tx_buf;
1713         struct tx_pkt_cmpl *txcmp;
1714         uint32_t cons, cp_cons;
1715
1716         if (!txq)
1717                 return -EINVAL;
1718
1719         cpr = txq->cp_ring;
1720         txr = txq->tx_ring;
1721
1722         if (offset >= txq->nb_tx_desc)
1723                 return -EINVAL;
1724
1725         cons = RING_CMP(cpr->cp_ring_struct, offset);
1726         txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
1727         cp_cons = cpr->cp_raw_cons;
1728
1729         if (cons > cp_cons) {
1730                 if (CMPL_VALID(txcmp, cpr->valid))
1731                         return RTE_ETH_TX_DESC_UNAVAIL;
1732         } else {
1733                 if (CMPL_VALID(txcmp, !cpr->valid))
1734                         return RTE_ETH_TX_DESC_UNAVAIL;
1735         }
1736         tx_buf = &txr->tx_buf_ring[cons];
1737         if (tx_buf->mbuf == NULL)
1738                 return RTE_ETH_TX_DESC_DONE;
1739
1740         return RTE_ETH_TX_DESC_FULL;
1741 }
1742
1743 static struct bnxt_filter_info *
1744 bnxt_match_and_validate_ether_filter(struct bnxt *bp,
1745                                 struct rte_eth_ethertype_filter *efilter,
1746                                 struct bnxt_vnic_info *vnic0,
1747                                 struct bnxt_vnic_info *vnic,
1748                                 int *ret)
1749 {
1750         struct bnxt_filter_info *mfilter = NULL;
1751         int match = 0;
1752         *ret = 0;
1753
1754         if (efilter->ether_type == ETHER_TYPE_IPv4 ||
1755                 efilter->ether_type == ETHER_TYPE_IPv6) {
1756                 PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
1757                         " ethertype filter.", efilter->ether_type);
1758                 *ret = -EINVAL;
1759                 goto exit;
1760         }
1761         if (efilter->queue >= bp->rx_nr_rings) {
1762                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
1763                 *ret = -EINVAL;
1764                 goto exit;
1765         }
1766
1767         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1768         vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1769         if (vnic == NULL) {
1770                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
1771                 *ret = -EINVAL;
1772                 goto exit;
1773         }
1774
1775         if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1776                 STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
1777                         if ((!memcmp(efilter->mac_addr.addr_bytes,
1778                                      mfilter->l2_addr, ETHER_ADDR_LEN) &&
1779                              mfilter->flags ==
1780                              HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
1781                              mfilter->ethertype == efilter->ether_type)) {
1782                                 match = 1;
1783                                 break;
1784                         }
1785                 }
1786         } else {
1787                 STAILQ_FOREACH(mfilter, &vnic->filter, next)
1788                         if ((!memcmp(efilter->mac_addr.addr_bytes,
1789                                      mfilter->l2_addr, ETHER_ADDR_LEN) &&
1790                              mfilter->ethertype == efilter->ether_type &&
1791                              mfilter->flags ==
1792                              HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
1793                                 match = 1;
1794                                 break;
1795                         }
1796         }
1797
1798         if (match)
1799                 *ret = -EEXIST;
1800
1801 exit:
1802         return mfilter;
1803 }
1804
1805 static int
1806 bnxt_ethertype_filter(struct rte_eth_dev *dev,
1807                         enum rte_filter_op filter_op,
1808                         void *arg)
1809 {
1810         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1811         struct rte_eth_ethertype_filter *efilter =
1812                         (struct rte_eth_ethertype_filter *)arg;
1813         struct bnxt_filter_info *bfilter, *filter1;
1814         struct bnxt_vnic_info *vnic, *vnic0;
1815         int ret;
1816
1817         if (filter_op == RTE_ETH_FILTER_NOP)
1818                 return 0;
1819
1820         if (arg == NULL) {
1821                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
1822                             filter_op);
1823                 return -EINVAL;
1824         }
1825
1826         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
1827         vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
1828
1829         switch (filter_op) {
1830         case RTE_ETH_FILTER_ADD:
1831                 bnxt_match_and_validate_ether_filter(bp, efilter,
1832                                                         vnic0, vnic, &ret);
1833                 if (ret < 0)
1834                         return ret;
1835
1836                 bfilter = bnxt_get_unused_filter(bp);
1837                 if (bfilter == NULL) {
1838                         PMD_DRV_LOG(ERR,
1839                                 "Not enough resources for a new filter.\n");
1840                         return -ENOMEM;
1841                 }
1842                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
1843                 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
1844                        ETHER_ADDR_LEN);
1845                 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
1846                        ETHER_ADDR_LEN);
1847                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
1848                 bfilter->ethertype = efilter->ether_type;
1849                 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
1850
1851                 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
1852                 if (filter1 == NULL) {
1853                         ret = -1;
1854                         goto cleanup;
1855                 }
1856                 bfilter->enables |=
1857                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1858                 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1859
1860                 bfilter->dst_id = vnic->fw_vnic_id;
1861
1862                 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
1863                         bfilter->flags =
1864                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1865                 }
1866
1867                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
1868                 if (ret)
1869                         goto cleanup;
1870                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
1871                 break;
1872         case RTE_ETH_FILTER_DELETE:
1873                 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter,
1874                                                         vnic0, vnic, &ret);
1875                 if (ret == -EEXIST) {
1876                         ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1);
1877
1878                         STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info,
1879                                       next);
1880                         bnxt_free_filter(bp, filter1);
1881                 } else if (ret == 0) {
1882                         PMD_DRV_LOG(ERR, "No matching filter found\n");
1883                 }
1884                 break;
1885         default:
1886                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
1887                 ret = -EINVAL;
1888                 goto error;
1889         }
1890         return ret;
1891 cleanup:
1892         bnxt_free_filter(bp, bfilter);
1893 error:
1894         return ret;
1895 }
1896
1897 static inline int
1898 parse_ntuple_filter(struct bnxt *bp,
1899                     struct rte_eth_ntuple_filter *nfilter,
1900                     struct bnxt_filter_info *bfilter)
1901 {
1902         uint32_t en = 0;
1903
1904         if (nfilter->queue >= bp->rx_nr_rings) {
1905                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
1906                 return -EINVAL;
1907         }
1908
1909         switch (nfilter->dst_port_mask) {
1910         case UINT16_MAX:
1911                 bfilter->dst_port_mask = -1;
1912                 bfilter->dst_port = nfilter->dst_port;
1913                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT |
1914                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
1915                 break;
1916         default:
1917                 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
1918                 return -EINVAL;
1919         }
1920
1921         bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
1922         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
1923
1924         switch (nfilter->proto_mask) {
1925         case UINT8_MAX:
1926                 if (nfilter->proto == 17) /* IPPROTO_UDP */
1927                         bfilter->ip_protocol = 17;
1928                 else if (nfilter->proto == 6) /* IPPROTO_TCP */
1929                         bfilter->ip_protocol = 6;
1930                 else
1931                         return -EINVAL;
1932                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
1933                 break;
1934         default:
1935                 PMD_DRV_LOG(ERR, "invalid protocol mask.");
1936                 return -EINVAL;
1937         }
1938
1939         switch (nfilter->dst_ip_mask) {
1940         case UINT32_MAX:
1941                 bfilter->dst_ipaddr_mask[0] = -1;
1942                 bfilter->dst_ipaddr[0] = nfilter->dst_ip;
1943                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR |
1944                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
1945                 break;
1946         default:
1947                 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
1948                 return -EINVAL;
1949         }
1950
1951         switch (nfilter->src_ip_mask) {
1952         case UINT32_MAX:
1953                 bfilter->src_ipaddr_mask[0] = -1;
1954                 bfilter->src_ipaddr[0] = nfilter->src_ip;
1955                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
1956                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
1957                 break;
1958         default:
1959                 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
1960                 return -EINVAL;
1961         }
1962
1963         switch (nfilter->src_port_mask) {
1964         case UINT16_MAX:
1965                 bfilter->src_port_mask = -1;
1966                 bfilter->src_port = nfilter->src_port;
1967                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
1968                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
1969                 break;
1970         default:
1971                 PMD_DRV_LOG(ERR, "invalid src_port mask.");
1972                 return -EINVAL;
1973         }
1974
1975         //TODO Priority
1976         //nfilter->priority = (uint8_t)filter->priority;
1977
1978         bfilter->enables = en;
1979         return 0;
1980 }
1981
1982 static struct bnxt_filter_info*
1983 bnxt_match_ntuple_filter(struct bnxt *bp,
1984                          struct bnxt_filter_info *bfilter,
1985                          struct bnxt_vnic_info **mvnic)
1986 {
1987         struct bnxt_filter_info *mfilter = NULL;
1988         int i;
1989
1990         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1991                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1992                 STAILQ_FOREACH(mfilter, &vnic->filter, next) {
1993                         if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
1994                             bfilter->src_ipaddr_mask[0] ==
1995                             mfilter->src_ipaddr_mask[0] &&
1996                             bfilter->src_port == mfilter->src_port &&
1997                             bfilter->src_port_mask == mfilter->src_port_mask &&
1998                             bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
1999                             bfilter->dst_ipaddr_mask[0] ==
2000                             mfilter->dst_ipaddr_mask[0] &&
2001                             bfilter->dst_port == mfilter->dst_port &&
2002                             bfilter->dst_port_mask == mfilter->dst_port_mask &&
2003                             bfilter->flags == mfilter->flags &&
2004                             bfilter->enables == mfilter->enables) {
2005                                 if (mvnic)
2006                                         *mvnic = vnic;
2007                                 return mfilter;
2008                         }
2009                 }
2010         }
2011         return NULL;
2012 }
2013
2014 static int
2015 bnxt_cfg_ntuple_filter(struct bnxt *bp,
2016                        struct rte_eth_ntuple_filter *nfilter,
2017                        enum rte_filter_op filter_op)
2018 {
2019         struct bnxt_filter_info *bfilter, *mfilter, *filter1;
2020         struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
2021         int ret;
2022
2023         if (nfilter->flags != RTE_5TUPLE_FLAGS) {
2024                 PMD_DRV_LOG(ERR, "only 5tuple is supported.");
2025                 return -EINVAL;
2026         }
2027
2028         if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
2029                 PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
2030                 return -EINVAL;
2031         }
2032
2033         bfilter = bnxt_get_unused_filter(bp);
2034         if (bfilter == NULL) {
2035                 PMD_DRV_LOG(ERR,
2036                         "Not enough resources for a new filter.\n");
2037                 return -ENOMEM;
2038         }
2039         ret = parse_ntuple_filter(bp, nfilter, bfilter);
2040         if (ret < 0)
2041                 goto free_filter;
2042
2043         vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]);
2044         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
2045         filter1 = STAILQ_FIRST(&vnic0->filter);
2046         if (filter1 == NULL) {
2047                 ret = -1;
2048                 goto free_filter;
2049         }
2050
2051         bfilter->dst_id = vnic->fw_vnic_id;
2052         bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2053         bfilter->enables |=
2054                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2055         bfilter->ethertype = 0x800;
2056         bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2057
2058         mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
2059
2060         if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2061             bfilter->dst_id == mfilter->dst_id) {
2062                 PMD_DRV_LOG(ERR, "filter exists.\n");
2063                 ret = -EEXIST;
2064                 goto free_filter;
2065         } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
2066                    bfilter->dst_id != mfilter->dst_id) {
2067                 mfilter->dst_id = vnic->fw_vnic_id;
2068                 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
2069                 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
2070                 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
2071                 PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
2072                 PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
2073                 goto free_filter;
2074         }
2075         if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2076                 PMD_DRV_LOG(ERR, "filter doesn't exist.");
2077                 ret = -ENOENT;
2078                 goto free_filter;
2079         }
2080
2081         if (filter_op == RTE_ETH_FILTER_ADD) {
2082                 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2083                 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter);
2084                 if (ret)
2085                         goto free_filter;
2086                 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next);
2087         } else {
2088                 if (mfilter == NULL) {
2089                         /* This should not happen. But for Coverity! */
2090                         ret = -ENOENT;
2091                         goto free_filter;
2092                 }
2093                 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
2094
2095                 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
2096                 bnxt_free_filter(bp, mfilter);
2097                 mfilter->fw_l2_filter_id = -1;
2098                 bnxt_free_filter(bp, bfilter);
2099                 bfilter->fw_l2_filter_id = -1;
2100         }
2101
2102         return 0;
2103 free_filter:
2104         bfilter->fw_l2_filter_id = -1;
2105         bnxt_free_filter(bp, bfilter);
2106         return ret;
2107 }
2108
2109 static int
2110 bnxt_ntuple_filter(struct rte_eth_dev *dev,
2111                         enum rte_filter_op filter_op,
2112                         void *arg)
2113 {
2114         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2115         int ret;
2116
2117         if (filter_op == RTE_ETH_FILTER_NOP)
2118                 return 0;
2119
2120         if (arg == NULL) {
2121                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
2122                             filter_op);
2123                 return -EINVAL;
2124         }
2125
2126         switch (filter_op) {
2127         case RTE_ETH_FILTER_ADD:
2128                 ret = bnxt_cfg_ntuple_filter(bp,
2129                         (struct rte_eth_ntuple_filter *)arg,
2130                         filter_op);
2131                 break;
2132         case RTE_ETH_FILTER_DELETE:
2133                 ret = bnxt_cfg_ntuple_filter(bp,
2134                         (struct rte_eth_ntuple_filter *)arg,
2135                         filter_op);
2136                 break;
2137         default:
2138                 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
2139                 ret = -EINVAL;
2140                 break;
2141         }
2142         return ret;
2143 }
2144
2145 static int
2146 bnxt_parse_fdir_filter(struct bnxt *bp,
2147                        struct rte_eth_fdir_filter *fdir,
2148                        struct bnxt_filter_info *filter)
2149 {
2150         enum rte_fdir_mode fdir_mode =
2151                 bp->eth_dev->data->dev_conf.fdir_conf.mode;
2152         struct bnxt_vnic_info *vnic0, *vnic;
2153         struct bnxt_filter_info *filter1;
2154         uint32_t en = 0;
2155         int i;
2156
2157         if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2158                 return -EINVAL;
2159
2160         filter->l2_ovlan = fdir->input.flow_ext.vlan_tci;
2161         en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
2162
2163         switch (fdir->input.flow_type) {
2164         case RTE_ETH_FLOW_IPV4:
2165         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2166                 /* FALLTHROUGH */
2167                 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip;
2168                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2169                 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip;
2170                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2171                 filter->ip_protocol = fdir->input.flow.ip4_flow.proto;
2172                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2173                 filter->ip_addr_type =
2174                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2175                 filter->src_ipaddr_mask[0] = 0xffffffff;
2176                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2177                 filter->dst_ipaddr_mask[0] = 0xffffffff;
2178                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2179                 filter->ethertype = 0x800;
2180                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2181                 break;
2182         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2183                 filter->src_port = fdir->input.flow.tcp4_flow.src_port;
2184                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2185                 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port;
2186                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2187                 filter->dst_port_mask = 0xffff;
2188                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2189                 filter->src_port_mask = 0xffff;
2190                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2191                 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip;
2192                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2193                 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip;
2194                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2195                 filter->ip_protocol = 6;
2196                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2197                 filter->ip_addr_type =
2198                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2199                 filter->src_ipaddr_mask[0] = 0xffffffff;
2200                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2201                 filter->dst_ipaddr_mask[0] = 0xffffffff;
2202                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2203                 filter->ethertype = 0x800;
2204                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2205                 break;
2206         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2207                 filter->src_port = fdir->input.flow.udp4_flow.src_port;
2208                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2209                 filter->dst_port = fdir->input.flow.udp4_flow.dst_port;
2210                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2211                 filter->dst_port_mask = 0xffff;
2212                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2213                 filter->src_port_mask = 0xffff;
2214                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2215                 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip;
2216                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2217                 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip;
2218                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2219                 filter->ip_protocol = 17;
2220                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2221                 filter->ip_addr_type =
2222                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
2223                 filter->src_ipaddr_mask[0] = 0xffffffff;
2224                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2225                 filter->dst_ipaddr_mask[0] = 0xffffffff;
2226                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2227                 filter->ethertype = 0x800;
2228                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2229                 break;
2230         case RTE_ETH_FLOW_IPV6:
2231         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2232                 /* FALLTHROUGH */
2233                 filter->ip_addr_type =
2234                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2235                 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto;
2236                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2237                 rte_memcpy(filter->src_ipaddr,
2238                            fdir->input.flow.ipv6_flow.src_ip, 16);
2239                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2240                 rte_memcpy(filter->dst_ipaddr,
2241                            fdir->input.flow.ipv6_flow.dst_ip, 16);
2242                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2243                 memset(filter->dst_ipaddr_mask, 0xff, 16);
2244                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2245                 memset(filter->src_ipaddr_mask, 0xff, 16);
2246                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2247                 filter->ethertype = 0x86dd;
2248                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2249                 break;
2250         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2251                 filter->src_port = fdir->input.flow.tcp6_flow.src_port;
2252                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2253                 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port;
2254                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2255                 filter->dst_port_mask = 0xffff;
2256                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2257                 filter->src_port_mask = 0xffff;
2258                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2259                 filter->ip_addr_type =
2260                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2261                 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto;
2262                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2263                 rte_memcpy(filter->src_ipaddr,
2264                            fdir->input.flow.tcp6_flow.ip.src_ip, 16);
2265                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2266                 rte_memcpy(filter->dst_ipaddr,
2267                            fdir->input.flow.tcp6_flow.ip.dst_ip, 16);
2268                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2269                 memset(filter->dst_ipaddr_mask, 0xff, 16);
2270                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2271                 memset(filter->src_ipaddr_mask, 0xff, 16);
2272                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2273                 filter->ethertype = 0x86dd;
2274                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2275                 break;
2276         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2277                 filter->src_port = fdir->input.flow.udp6_flow.src_port;
2278                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT;
2279                 filter->dst_port = fdir->input.flow.udp6_flow.dst_port;
2280                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
2281                 filter->dst_port_mask = 0xffff;
2282                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
2283                 filter->src_port_mask = 0xffff;
2284                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
2285                 filter->ip_addr_type =
2286                         NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
2287                 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto;
2288                 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
2289                 rte_memcpy(filter->src_ipaddr,
2290                            fdir->input.flow.udp6_flow.ip.src_ip, 16);
2291                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR;
2292                 rte_memcpy(filter->dst_ipaddr,
2293                            fdir->input.flow.udp6_flow.ip.dst_ip, 16);
2294                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
2295                 memset(filter->dst_ipaddr_mask, 0xff, 16);
2296                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
2297                 memset(filter->src_ipaddr_mask, 0xff, 16);
2298                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
2299                 filter->ethertype = 0x86dd;
2300                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2301                 break;
2302         case RTE_ETH_FLOW_L2_PAYLOAD:
2303                 filter->ethertype = fdir->input.flow.l2_flow.ether_type;
2304                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
2305                 break;
2306         case RTE_ETH_FLOW_VXLAN:
2307                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2308                         return -EINVAL;
2309                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2310                 filter->tunnel_type =
2311                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
2312                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2313                 break;
2314         case RTE_ETH_FLOW_NVGRE:
2315                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2316                         return -EINVAL;
2317                 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id;
2318                 filter->tunnel_type =
2319                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
2320                 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE;
2321                 break;
2322         case RTE_ETH_FLOW_UNKNOWN:
2323         case RTE_ETH_FLOW_RAW:
2324         case RTE_ETH_FLOW_FRAG_IPV4:
2325         case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
2326         case RTE_ETH_FLOW_FRAG_IPV6:
2327         case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
2328         case RTE_ETH_FLOW_IPV6_EX:
2329         case RTE_ETH_FLOW_IPV6_TCP_EX:
2330         case RTE_ETH_FLOW_IPV6_UDP_EX:
2331         case RTE_ETH_FLOW_GENEVE:
2332                 /* FALLTHROUGH */
2333         default:
2334                 return -EINVAL;
2335         }
2336
2337         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
2338         vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
2339         if (vnic == NULL) {
2340                 PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
2341                 return -EINVAL;
2342         }
2343
2344
2345         if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2346                 rte_memcpy(filter->dst_macaddr,
2347                         fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6);
2348                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
2349         }
2350
2351         if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) {
2352                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
2353                 filter1 = STAILQ_FIRST(&vnic0->filter);
2354                 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
2355         } else {
2356                 filter->dst_id = vnic->fw_vnic_id;
2357                 for (i = 0; i < ETHER_ADDR_LEN; i++)
2358                         if (filter->dst_macaddr[i] == 0x00)
2359                                 filter1 = STAILQ_FIRST(&vnic0->filter);
2360                         else
2361                                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
2362         }
2363
2364         if (filter1 == NULL)
2365                 return -EINVAL;
2366
2367         en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2368         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
2369
2370         filter->enables = en;
2371
2372         return 0;
2373 }
2374
2375 static struct bnxt_filter_info *
2376 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
2377                 struct bnxt_vnic_info **mvnic)
2378 {
2379         struct bnxt_filter_info *mf = NULL;
2380         int i;
2381
2382         for (i = bp->nr_vnics - 1; i >= 0; i--) {
2383                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2384
2385                 STAILQ_FOREACH(mf, &vnic->filter, next) {
2386                         if (mf->filter_type == nf->filter_type &&
2387                             mf->flags == nf->flags &&
2388                             mf->src_port == nf->src_port &&
2389                             mf->src_port_mask == nf->src_port_mask &&
2390                             mf->dst_port == nf->dst_port &&
2391                             mf->dst_port_mask == nf->dst_port_mask &&
2392                             mf->ip_protocol == nf->ip_protocol &&
2393                             mf->ip_addr_type == nf->ip_addr_type &&
2394                             mf->ethertype == nf->ethertype &&
2395                             mf->vni == nf->vni &&
2396                             mf->tunnel_type == nf->tunnel_type &&
2397                             mf->l2_ovlan == nf->l2_ovlan &&
2398                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
2399                             mf->l2_ivlan == nf->l2_ivlan &&
2400                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
2401                             !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
2402                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
2403                                     ETHER_ADDR_LEN) &&
2404                             !memcmp(mf->src_macaddr, nf->src_macaddr,
2405                                     ETHER_ADDR_LEN) &&
2406                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
2407                                     ETHER_ADDR_LEN) &&
2408                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
2409                                     sizeof(nf->src_ipaddr)) &&
2410                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
2411                                     sizeof(nf->src_ipaddr_mask)) &&
2412                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
2413                                     sizeof(nf->dst_ipaddr)) &&
2414                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
2415                                     sizeof(nf->dst_ipaddr_mask))) {
2416                                 if (mvnic)
2417                                         *mvnic = vnic;
2418                                 return mf;
2419                         }
2420                 }
2421         }
2422         return NULL;
2423 }
2424
2425 static int
2426 bnxt_fdir_filter(struct rte_eth_dev *dev,
2427                  enum rte_filter_op filter_op,
2428                  void *arg)
2429 {
2430         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2431         struct rte_eth_fdir_filter *fdir  = (struct rte_eth_fdir_filter *)arg;
2432         struct bnxt_filter_info *filter, *match;
2433         struct bnxt_vnic_info *vnic, *mvnic;
2434         int ret = 0, i;
2435
2436         if (filter_op == RTE_ETH_FILTER_NOP)
2437                 return 0;
2438
2439         if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2440                 return -EINVAL;
2441
2442         switch (filter_op) {
2443         case RTE_ETH_FILTER_ADD:
2444         case RTE_ETH_FILTER_DELETE:
2445                 filter = bnxt_get_unused_filter(bp);
2446                 if (filter == NULL) {
2447                         PMD_DRV_LOG(ERR,
2448                                 "Not enough resources for a new flow.\n");
2449                         return -ENOMEM;
2450                 }
2451
2452                 ret = bnxt_parse_fdir_filter(bp, fdir, filter);
2453                 if (ret != 0)
2454                         goto free_filter;
2455                 filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
2456
2457                 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
2458                         vnic = STAILQ_FIRST(&bp->ff_pool[0]);
2459                 else
2460                         vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
2461
2462                 match = bnxt_match_fdir(bp, filter, &mvnic);
2463                 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
2464                         if (match->dst_id == vnic->fw_vnic_id) {
2465                                 PMD_DRV_LOG(ERR, "Flow already exists.\n");
2466                                 ret = -EEXIST;
2467                                 goto free_filter;
2468                         } else {
2469                                 match->dst_id = vnic->fw_vnic_id;
2470                                 ret = bnxt_hwrm_set_ntuple_filter(bp,
2471                                                                   match->dst_id,
2472                                                                   match);
2473                                 STAILQ_REMOVE(&mvnic->filter, match,
2474                                               bnxt_filter_info, next);
2475                                 STAILQ_INSERT_TAIL(&vnic->filter, match, next);
2476                                 PMD_DRV_LOG(ERR,
2477                                         "Filter with matching pattern exist\n");
2478                                 PMD_DRV_LOG(ERR,
2479                                         "Updated it to new destination q\n");
2480                                 goto free_filter;
2481                         }
2482                 }
2483                 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
2484                         PMD_DRV_LOG(ERR, "Flow does not exist.\n");
2485                         ret = -ENOENT;
2486                         goto free_filter;
2487                 }
2488
2489                 if (filter_op == RTE_ETH_FILTER_ADD) {
2490                         ret = bnxt_hwrm_set_ntuple_filter(bp,
2491                                                           filter->dst_id,
2492                                                           filter);
2493                         if (ret)
2494                                 goto free_filter;
2495                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2496                 } else {
2497                         ret = bnxt_hwrm_clear_ntuple_filter(bp, match);
2498                         STAILQ_REMOVE(&vnic->filter, match,
2499                                       bnxt_filter_info, next);
2500                         bnxt_free_filter(bp, match);
2501                         filter->fw_l2_filter_id = -1;
2502                         bnxt_free_filter(bp, filter);
2503                 }
2504                 break;
2505         case RTE_ETH_FILTER_FLUSH:
2506                 for (i = bp->nr_vnics - 1; i >= 0; i--) {
2507                         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2508
2509                         STAILQ_FOREACH(filter, &vnic->filter, next) {
2510                                 if (filter->filter_type ==
2511                                     HWRM_CFA_NTUPLE_FILTER) {
2512                                         ret =
2513                                         bnxt_hwrm_clear_ntuple_filter(bp,
2514                                                                       filter);
2515                                         STAILQ_REMOVE(&vnic->filter, filter,
2516                                                       bnxt_filter_info, next);
2517                                 }
2518                         }
2519                 }
2520                 return ret;
2521         case RTE_ETH_FILTER_UPDATE:
2522         case RTE_ETH_FILTER_STATS:
2523         case RTE_ETH_FILTER_INFO:
2524                 PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
2525                 break;
2526         default:
2527                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
2528                 ret = -EINVAL;
2529                 break;
2530         }
2531         return ret;
2532
2533 free_filter:
2534         filter->fw_l2_filter_id = -1;
2535         bnxt_free_filter(bp, filter);
2536         return ret;
2537 }
2538
2539 static int
2540 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
2541                     enum rte_filter_type filter_type,
2542                     enum rte_filter_op filter_op, void *arg)
2543 {
2544         int ret = 0;
2545
2546         switch (filter_type) {
2547         case RTE_ETH_FILTER_TUNNEL:
2548                 PMD_DRV_LOG(ERR,
2549                         "filter type: %d: To be implemented\n", filter_type);
2550                 break;
2551         case RTE_ETH_FILTER_FDIR:
2552                 ret = bnxt_fdir_filter(dev, filter_op, arg);
2553                 break;
2554         case RTE_ETH_FILTER_NTUPLE:
2555                 ret = bnxt_ntuple_filter(dev, filter_op, arg);
2556                 break;
2557         case RTE_ETH_FILTER_ETHERTYPE:
2558                 ret = bnxt_ethertype_filter(dev, filter_op, arg);
2559                 break;
2560         case RTE_ETH_FILTER_GENERIC:
2561                 if (filter_op != RTE_ETH_FILTER_GET)
2562                         return -EINVAL;
2563                 *(const void **)arg = &bnxt_flow_ops;
2564                 break;
2565         default:
2566                 PMD_DRV_LOG(ERR,
2567                         "Filter type (%d) not supported", filter_type);
2568                 ret = -EINVAL;
2569                 break;
2570         }
2571         return ret;
2572 }
2573
2574 static const uint32_t *
2575 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
2576 {
2577         static const uint32_t ptypes[] = {
2578                 RTE_PTYPE_L2_ETHER_VLAN,
2579                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2580                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2581                 RTE_PTYPE_L4_ICMP,
2582                 RTE_PTYPE_L4_TCP,
2583                 RTE_PTYPE_L4_UDP,
2584                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2585                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2586                 RTE_PTYPE_INNER_L4_ICMP,
2587                 RTE_PTYPE_INNER_L4_TCP,
2588                 RTE_PTYPE_INNER_L4_UDP,
2589                 RTE_PTYPE_UNKNOWN
2590         };
2591
2592         if (dev->rx_pkt_burst == bnxt_recv_pkts)
2593                 return ptypes;
2594         return NULL;
2595 }
2596
2597 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
2598                          int reg_win)
2599 {
2600         uint32_t reg_base = *reg_arr & 0xfffff000;
2601         uint32_t win_off;
2602         int i;
2603
2604         for (i = 0; i < count; i++) {
2605                 if ((reg_arr[i] & 0xfffff000) != reg_base)
2606                         return -ERANGE;
2607         }
2608         win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
2609         rte_cpu_to_le_32(rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off));
2610         return 0;
2611 }
2612
2613 static int bnxt_map_ptp_regs(struct bnxt *bp)
2614 {
2615         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2616         uint32_t *reg_arr;
2617         int rc, i;
2618
2619         reg_arr = ptp->rx_regs;
2620         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
2621         if (rc)
2622                 return rc;
2623
2624         reg_arr = ptp->tx_regs;
2625         rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
2626         if (rc)
2627                 return rc;
2628
2629         for (i = 0; i < BNXT_PTP_RX_REGS; i++)
2630                 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
2631
2632         for (i = 0; i < BNXT_PTP_TX_REGS; i++)
2633                 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
2634
2635         return 0;
2636 }
2637
2638 static void bnxt_unmap_ptp_regs(struct bnxt *bp)
2639 {
2640         rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
2641                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16));
2642         rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
2643                          BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20));
2644 }
2645
2646 static uint64_t bnxt_cc_read(struct bnxt *bp)
2647 {
2648         uint64_t ns;
2649
2650         ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2651                               BNXT_GRCPF_REG_SYNC_TIME));
2652         ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2653                                           BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
2654         return ns;
2655 }
2656
2657 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
2658 {
2659         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2660         uint32_t fifo;
2661
2662         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2663                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
2664         if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
2665                 return -EAGAIN;
2666
2667         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2668                                 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
2669         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2670                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
2671         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2672                                 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
2673
2674         return 0;
2675 }
2676
2677 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
2678 {
2679         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2680         struct bnxt_pf_info *pf = &bp->pf;
2681         uint16_t port_id;
2682         uint32_t fifo;
2683
2684         if (!ptp)
2685                 return -ENODEV;
2686
2687         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2688                                 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
2689         if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
2690                 return -EAGAIN;
2691
2692         port_id = pf->port_id;
2693         rte_cpu_to_le_32(rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
2694                ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]));
2695
2696         fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2697                                    ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
2698         if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
2699 /*              bnxt_clr_rx_ts(bp);       TBD  */
2700                 return -EBUSY;
2701         }
2702
2703         *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2704                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
2705         *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
2706                                 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
2707
2708         return 0;
2709 }
2710
2711 static int
2712 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
2713 {
2714         uint64_t ns;
2715         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2716         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2717
2718         if (!ptp)
2719                 return 0;
2720
2721         ns = rte_timespec_to_ns(ts);
2722         /* Set the timecounters to a new value. */
2723         ptp->tc.nsec = ns;
2724
2725         return 0;
2726 }
2727
2728 static int
2729 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
2730 {
2731         uint64_t ns, systime_cycles;
2732         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2733         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2734
2735         if (!ptp)
2736                 return 0;
2737
2738         systime_cycles = bnxt_cc_read(bp);
2739         ns = rte_timecounter_update(&ptp->tc, systime_cycles);
2740         *ts = rte_ns_to_timespec(ns);
2741
2742         return 0;
2743 }
2744 static int
2745 bnxt_timesync_enable(struct rte_eth_dev *dev)
2746 {
2747         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2748         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2749         uint32_t shift = 0;
2750
2751         if (!ptp)
2752                 return 0;
2753
2754         ptp->rx_filter = 1;
2755         ptp->tx_tstamp_en = 1;
2756         ptp->rxctl = BNXT_PTP_MSG_EVENTS;
2757
2758         if (!bnxt_hwrm_ptp_cfg(bp))
2759                 bnxt_map_ptp_regs(bp);
2760
2761         memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
2762         memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2763         memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
2764
2765         ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2766         ptp->tc.cc_shift = shift;
2767         ptp->tc.nsec_mask = (1ULL << shift) - 1;
2768
2769         ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2770         ptp->rx_tstamp_tc.cc_shift = shift;
2771         ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2772
2773         ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
2774         ptp->tx_tstamp_tc.cc_shift = shift;
2775         ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
2776
2777         return 0;
2778 }
2779
2780 static int
2781 bnxt_timesync_disable(struct rte_eth_dev *dev)
2782 {
2783         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2784         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2785
2786         if (!ptp)
2787                 return 0;
2788
2789         ptp->rx_filter = 0;
2790         ptp->tx_tstamp_en = 0;
2791         ptp->rxctl = 0;
2792
2793         bnxt_hwrm_ptp_cfg(bp);
2794
2795         bnxt_unmap_ptp_regs(bp);
2796
2797         return 0;
2798 }
2799
2800 static int
2801 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
2802                                  struct timespec *timestamp,
2803                                  uint32_t flags __rte_unused)
2804 {
2805         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2806         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2807         uint64_t rx_tstamp_cycles = 0;
2808         uint64_t ns;
2809
2810         if (!ptp)
2811                 return 0;
2812
2813         bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
2814         ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
2815         *timestamp = rte_ns_to_timespec(ns);
2816         return  0;
2817 }
2818
2819 static int
2820 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
2821                                  struct timespec *timestamp)
2822 {
2823         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2824         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2825         uint64_t tx_tstamp_cycles = 0;
2826         uint64_t ns;
2827
2828         if (!ptp)
2829                 return 0;
2830
2831         bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
2832         ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
2833         *timestamp = rte_ns_to_timespec(ns);
2834
2835         return 0;
2836 }
2837
2838 static int
2839 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
2840 {
2841         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2842         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2843
2844         if (!ptp)
2845                 return 0;
2846
2847         ptp->tc.nsec += delta;
2848
2849         return 0;
2850 }
2851
2852 static int
2853 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
2854 {
2855         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2856         int rc;
2857         uint32_t dir_entries;
2858         uint32_t entry_length;
2859
2860         PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
2861                 bp->pdev->addr.domain, bp->pdev->addr.bus,
2862                 bp->pdev->addr.devid, bp->pdev->addr.function);
2863
2864         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
2865         if (rc != 0)
2866                 return rc;
2867
2868         return dir_entries * entry_length;
2869 }
2870
2871 static int
2872 bnxt_get_eeprom_op(struct rte_eth_dev *dev,
2873                 struct rte_dev_eeprom_info *in_eeprom)
2874 {
2875         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2876         uint32_t index;
2877         uint32_t offset;
2878
2879         PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
2880                 "len = %d\n", bp->pdev->addr.domain,
2881                 bp->pdev->addr.bus, bp->pdev->addr.devid,
2882                 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
2883
2884         if (in_eeprom->offset == 0) /* special offset value to get directory */
2885                 return bnxt_get_nvram_directory(bp, in_eeprom->length,
2886                                                 in_eeprom->data);
2887
2888         index = in_eeprom->offset >> 24;
2889         offset = in_eeprom->offset & 0xffffff;
2890
2891         if (index != 0)
2892                 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset,
2893                                            in_eeprom->length, in_eeprom->data);
2894
2895         return 0;
2896 }
2897
2898 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
2899 {
2900         switch (dir_type) {
2901         case BNX_DIR_TYPE_CHIMP_PATCH:
2902         case BNX_DIR_TYPE_BOOTCODE:
2903         case BNX_DIR_TYPE_BOOTCODE_2:
2904         case BNX_DIR_TYPE_APE_FW:
2905         case BNX_DIR_TYPE_APE_PATCH:
2906         case BNX_DIR_TYPE_KONG_FW:
2907         case BNX_DIR_TYPE_KONG_PATCH:
2908         case BNX_DIR_TYPE_BONO_FW:
2909         case BNX_DIR_TYPE_BONO_PATCH:
2910                 return true;
2911         }
2912
2913         return false;
2914 }
2915
2916 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
2917 {
2918         switch (dir_type) {
2919         case BNX_DIR_TYPE_AVS:
2920         case BNX_DIR_TYPE_EXP_ROM_MBA:
2921         case BNX_DIR_TYPE_PCIE:
2922         case BNX_DIR_TYPE_TSCF_UCODE:
2923         case BNX_DIR_TYPE_EXT_PHY:
2924         case BNX_DIR_TYPE_CCM:
2925         case BNX_DIR_TYPE_ISCSI_BOOT:
2926         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
2927         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
2928                 return true;
2929         }
2930
2931         return false;
2932 }
2933
2934 static bool bnxt_dir_type_is_executable(uint16_t dir_type)
2935 {
2936         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
2937                 bnxt_dir_type_is_other_exec_format(dir_type);
2938 }
2939
2940 static int
2941 bnxt_set_eeprom_op(struct rte_eth_dev *dev,
2942                 struct rte_dev_eeprom_info *in_eeprom)
2943 {
2944         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
2945         uint8_t index, dir_op;
2946         uint16_t type, ext, ordinal, attr;
2947
2948         PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
2949                 "len = %d\n", bp->pdev->addr.domain,
2950                 bp->pdev->addr.bus, bp->pdev->addr.devid,
2951                 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
2952
2953         if (!BNXT_PF(bp)) {
2954                 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
2955                 return -EINVAL;
2956         }
2957
2958         type = in_eeprom->magic >> 16;
2959
2960         if (type == 0xffff) { /* special value for directory operations */
2961                 index = in_eeprom->magic & 0xff;
2962                 dir_op = in_eeprom->magic >> 8;
2963                 if (index == 0)
2964                         return -EINVAL;
2965                 switch (dir_op) {
2966                 case 0x0e: /* erase */
2967                         if (in_eeprom->offset != ~in_eeprom->magic)
2968                                 return -EINVAL;
2969                         return bnxt_hwrm_erase_nvram_directory(bp, index - 1);
2970                 default:
2971                         return -EINVAL;
2972                 }
2973         }
2974
2975         /* Create or re-write an NVM item: */
2976         if (bnxt_dir_type_is_executable(type) == true)
2977                 return -EOPNOTSUPP;
2978         ext = in_eeprom->magic & 0xffff;
2979         ordinal = in_eeprom->offset >> 16;
2980         attr = in_eeprom->offset & 0xffff;
2981
2982         return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
2983                                      in_eeprom->data, in_eeprom->length);
2984         return 0;
2985 }
2986
2987 /*
2988  * Initialization
2989  */
2990
2991 static const struct eth_dev_ops bnxt_dev_ops = {
2992         .dev_infos_get = bnxt_dev_info_get_op,
2993         .dev_close = bnxt_dev_close_op,
2994         .dev_configure = bnxt_dev_configure_op,
2995         .dev_start = bnxt_dev_start_op,
2996         .dev_stop = bnxt_dev_stop_op,
2997         .dev_set_link_up = bnxt_dev_set_link_up_op,
2998         .dev_set_link_down = bnxt_dev_set_link_down_op,
2999         .stats_get = bnxt_stats_get_op,
3000         .stats_reset = bnxt_stats_reset_op,
3001         .rx_queue_setup = bnxt_rx_queue_setup_op,
3002         .rx_queue_release = bnxt_rx_queue_release_op,
3003         .tx_queue_setup = bnxt_tx_queue_setup_op,
3004         .tx_queue_release = bnxt_tx_queue_release_op,
3005         .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op,
3006         .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op,
3007         .reta_update = bnxt_reta_update_op,
3008         .reta_query = bnxt_reta_query_op,
3009         .rss_hash_update = bnxt_rss_hash_update_op,
3010         .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
3011         .link_update = bnxt_link_update_op,
3012         .promiscuous_enable = bnxt_promiscuous_enable_op,
3013         .promiscuous_disable = bnxt_promiscuous_disable_op,
3014         .allmulticast_enable = bnxt_allmulticast_enable_op,
3015         .allmulticast_disable = bnxt_allmulticast_disable_op,
3016         .mac_addr_add = bnxt_mac_addr_add_op,
3017         .mac_addr_remove = bnxt_mac_addr_remove_op,
3018         .flow_ctrl_get = bnxt_flow_ctrl_get_op,
3019         .flow_ctrl_set = bnxt_flow_ctrl_set_op,
3020         .udp_tunnel_port_add  = bnxt_udp_tunnel_port_add_op,
3021         .udp_tunnel_port_del  = bnxt_udp_tunnel_port_del_op,
3022         .vlan_filter_set = bnxt_vlan_filter_set_op,
3023         .vlan_offload_set = bnxt_vlan_offload_set_op,
3024         .vlan_pvid_set = bnxt_vlan_pvid_set_op,
3025         .mtu_set = bnxt_mtu_set_op,
3026         .mac_addr_set = bnxt_set_default_mac_addr_op,
3027         .xstats_get = bnxt_dev_xstats_get_op,
3028         .xstats_get_names = bnxt_dev_xstats_get_names_op,
3029         .xstats_reset = bnxt_dev_xstats_reset_op,
3030         .fw_version_get = bnxt_fw_version_get,
3031         .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
3032         .rxq_info_get = bnxt_rxq_info_get_op,
3033         .txq_info_get = bnxt_txq_info_get_op,
3034         .dev_led_on = bnxt_dev_led_on_op,
3035         .dev_led_off = bnxt_dev_led_off_op,
3036         .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
3037         .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
3038         .rx_queue_count = bnxt_rx_queue_count_op,
3039         .rx_descriptor_status = bnxt_rx_descriptor_status_op,
3040         .tx_descriptor_status = bnxt_tx_descriptor_status_op,
3041         .rx_queue_start = bnxt_rx_queue_start,
3042         .rx_queue_stop = bnxt_rx_queue_stop,
3043         .tx_queue_start = bnxt_tx_queue_start,
3044         .tx_queue_stop = bnxt_tx_queue_stop,
3045         .filter_ctrl = bnxt_filter_ctrl_op,
3046         .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
3047         .get_eeprom_length    = bnxt_get_eeprom_length_op,
3048         .get_eeprom           = bnxt_get_eeprom_op,
3049         .set_eeprom           = bnxt_set_eeprom_op,
3050         .timesync_enable      = bnxt_timesync_enable,
3051         .timesync_disable     = bnxt_timesync_disable,
3052         .timesync_read_time   = bnxt_timesync_read_time,
3053         .timesync_write_time   = bnxt_timesync_write_time,
3054         .timesync_adjust_time = bnxt_timesync_adjust_time,
3055         .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
3056         .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
3057 };
3058
3059 static bool bnxt_vf_pciid(uint16_t id)
3060 {
3061         if (id == BROADCOM_DEV_ID_57304_VF ||
3062             id == BROADCOM_DEV_ID_57406_VF ||
3063             id == BROADCOM_DEV_ID_5731X_VF ||
3064             id == BROADCOM_DEV_ID_5741X_VF ||
3065             id == BROADCOM_DEV_ID_57414_VF ||
3066             id == BROADCOM_DEV_ID_STRATUS_NIC_VF)
3067                 return true;
3068         return false;
3069 }
3070
3071 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
3072 {
3073         struct bnxt *bp = eth_dev->data->dev_private;
3074         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3075         int rc;
3076
3077         /* enable device (incl. PCI PM wakeup), and bus-mastering */
3078         if (!pci_dev->mem_resource[0].addr) {
3079                 PMD_DRV_LOG(ERR,
3080                         "Cannot find PCI device base address, aborting\n");
3081                 rc = -ENODEV;
3082                 goto init_err_disable;
3083         }
3084
3085         bp->eth_dev = eth_dev;
3086         bp->pdev = pci_dev;
3087
3088         bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
3089         if (!bp->bar0) {
3090                 PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
3091                 rc = -ENOMEM;
3092                 goto init_err_release;
3093         }
3094         return 0;
3095
3096 init_err_release:
3097         if (bp->bar0)
3098                 bp->bar0 = NULL;
3099
3100 init_err_disable:
3101
3102         return rc;
3103 }
3104
3105 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
3106
3107 #define ALLOW_FUNC(x)   \
3108         { \
3109                 typeof(x) arg = (x); \
3110                 bp->pf.vf_req_fwd[((arg) >> 5)] &= \
3111                 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
3112         }
3113 static int
3114 bnxt_dev_init(struct rte_eth_dev *eth_dev)
3115 {
3116         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3117         char mz_name[RTE_MEMZONE_NAMESIZE];
3118         const struct rte_memzone *mz = NULL;
3119         static int version_printed;
3120         uint32_t total_alloc_len;
3121         rte_iova_t mz_phys_addr;
3122         struct bnxt *bp;
3123         int rc;
3124
3125         if (version_printed++ == 0)
3126                 PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
3127
3128         rte_eth_copy_pci_info(eth_dev, pci_dev);
3129
3130         bp = eth_dev->data->dev_private;
3131
3132         bp->dev_stopped = 1;
3133
3134         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3135                 goto skip_init;
3136
3137         if (bnxt_vf_pciid(pci_dev->id.device_id))
3138                 bp->flags |= BNXT_FLAG_VF;
3139
3140         rc = bnxt_init_board(eth_dev);
3141         if (rc) {
3142                 PMD_DRV_LOG(ERR,
3143                         "Board initialization failed rc: %x\n", rc);
3144                 goto error;
3145         }
3146 skip_init:
3147         eth_dev->dev_ops = &bnxt_dev_ops;
3148         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3149                 return 0;
3150         eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
3151         eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
3152
3153         if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
3154                 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
3155                          "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
3156                          pci_dev->addr.bus, pci_dev->addr.devid,
3157                          pci_dev->addr.function, "rx_port_stats");
3158                 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
3159                 mz = rte_memzone_lookup(mz_name);
3160                 total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
3161                                 sizeof(struct rx_port_stats) + 512);
3162                 if (!mz) {
3163                         mz = rte_memzone_reserve(mz_name, total_alloc_len,
3164                                         SOCKET_ID_ANY,
3165                                         RTE_MEMZONE_2MB |
3166                                         RTE_MEMZONE_SIZE_HINT_ONLY |
3167                                         RTE_MEMZONE_IOVA_CONTIG);
3168                         if (mz == NULL)
3169                                 return -ENOMEM;
3170                 }
3171                 memset(mz->addr, 0, mz->len);
3172                 mz_phys_addr = mz->iova;
3173                 if ((unsigned long)mz->addr == mz_phys_addr) {
3174                         PMD_DRV_LOG(WARNING,
3175                                 "Memzone physical address same as virtual.\n");
3176                         PMD_DRV_LOG(WARNING,
3177                                 "Using rte_mem_virt2iova()\n");
3178                         mz_phys_addr = rte_mem_virt2iova(mz->addr);
3179                         if (mz_phys_addr == 0) {
3180                                 PMD_DRV_LOG(ERR,
3181                                 "unable to map address to physical memory\n");
3182                                 return -ENOMEM;
3183                         }
3184                 }
3185
3186                 bp->rx_mem_zone = (const void *)mz;
3187                 bp->hw_rx_port_stats = mz->addr;
3188                 bp->hw_rx_port_stats_map = mz_phys_addr;
3189
3190                 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
3191                          "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
3192                          pci_dev->addr.bus, pci_dev->addr.devid,
3193                          pci_dev->addr.function, "tx_port_stats");
3194                 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
3195                 mz = rte_memzone_lookup(mz_name);
3196                 total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
3197                                 sizeof(struct tx_port_stats) + 512);
3198                 if (!mz) {
3199                         mz = rte_memzone_reserve(mz_name,
3200                                         total_alloc_len,
3201                                         SOCKET_ID_ANY,
3202                                         RTE_MEMZONE_2MB |
3203                                         RTE_MEMZONE_SIZE_HINT_ONLY |
3204                                         RTE_MEMZONE_IOVA_CONTIG);
3205                         if (mz == NULL)
3206                                 return -ENOMEM;
3207                 }
3208                 memset(mz->addr, 0, mz->len);
3209                 mz_phys_addr = mz->iova;
3210                 if ((unsigned long)mz->addr == mz_phys_addr) {
3211                         PMD_DRV_LOG(WARNING,
3212                                 "Memzone physical address same as virtual.\n");
3213                         PMD_DRV_LOG(WARNING,
3214                                 "Using rte_mem_virt2iova()\n");
3215                         mz_phys_addr = rte_mem_virt2iova(mz->addr);
3216                         if (mz_phys_addr == 0) {
3217                                 PMD_DRV_LOG(ERR,
3218                                 "unable to map address to physical memory\n");
3219                                 return -ENOMEM;
3220                         }
3221                 }
3222
3223                 bp->tx_mem_zone = (const void *)mz;
3224                 bp->hw_tx_port_stats = mz->addr;
3225                 bp->hw_tx_port_stats_map = mz_phys_addr;
3226
3227                 bp->flags |= BNXT_FLAG_PORT_STATS;
3228         }
3229
3230         rc = bnxt_alloc_hwrm_resources(bp);
3231         if (rc) {
3232                 PMD_DRV_LOG(ERR,
3233                         "hwrm resource allocation failure rc: %x\n", rc);
3234                 goto error_free;
3235         }
3236         rc = bnxt_hwrm_ver_get(bp);
3237         if (rc)
3238                 goto error_free;
3239         rc = bnxt_hwrm_queue_qportcfg(bp);
3240         if (rc) {
3241                 PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
3242                 goto error_free;
3243         }
3244
3245         rc = bnxt_hwrm_func_qcfg(bp);
3246         if (rc) {
3247                 PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
3248                 goto error_free;
3249         }
3250
3251         /* Get the MAX capabilities for this function */
3252         rc = bnxt_hwrm_func_qcaps(bp);
3253         if (rc) {
3254                 PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
3255                 goto error_free;
3256         }
3257         if (bp->max_tx_rings == 0) {
3258                 PMD_DRV_LOG(ERR, "No TX rings available!\n");
3259                 rc = -EBUSY;
3260                 goto error_free;
3261         }
3262         eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
3263                                         ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
3264         if (eth_dev->data->mac_addrs == NULL) {
3265                 PMD_DRV_LOG(ERR,
3266                         "Failed to alloc %u bytes needed to store MAC addr tbl",
3267                         ETHER_ADDR_LEN * bp->max_l2_ctx);
3268                 rc = -ENOMEM;
3269                 goto error_free;
3270         }
3271
3272         if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
3273                 PMD_DRV_LOG(ERR,
3274                             "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
3275                             bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
3276                             bp->dflt_mac_addr[2], bp->dflt_mac_addr[3],
3277                             bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]);
3278                 rc = -EINVAL;
3279                 goto error_free;
3280         }
3281         /* Copy the permanent MAC from the qcap response address now. */
3282         memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
3283         memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
3284
3285         if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
3286                 /* 1 ring is for default completion ring */
3287                 PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
3288                 rc = -ENOSPC;
3289                 goto error_free;
3290         }
3291
3292         bp->grp_info = rte_zmalloc("bnxt_grp_info",
3293                                 sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
3294         if (!bp->grp_info) {
3295                 PMD_DRV_LOG(ERR,
3296                         "Failed to alloc %zu bytes to store group info table\n",
3297                         sizeof(*bp->grp_info) * bp->max_ring_grps);
3298                 rc = -ENOMEM;
3299                 goto error_free;
3300         }
3301
3302         /* Forward all requests if firmware is new enough */
3303         if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
3304             (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
3305             ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
3306                 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
3307         } else {
3308                 PMD_DRV_LOG(WARNING,
3309                         "Firmware too old for VF mailbox functionality\n");
3310                 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
3311         }
3312
3313         /*
3314          * The following are used for driver cleanup.  If we disallow these,
3315          * VF drivers can't clean up cleanly.
3316          */
3317         ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
3318         ALLOW_FUNC(HWRM_VNIC_FREE);
3319         ALLOW_FUNC(HWRM_RING_FREE);
3320         ALLOW_FUNC(HWRM_RING_GRP_FREE);
3321         ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
3322         ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
3323         ALLOW_FUNC(HWRM_STAT_CTX_FREE);
3324         ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
3325         ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
3326         rc = bnxt_hwrm_func_driver_register(bp);
3327         if (rc) {
3328                 PMD_DRV_LOG(ERR,
3329                         "Failed to register driver");
3330                 rc = -EBUSY;
3331                 goto error_free;
3332         }
3333
3334         PMD_DRV_LOG(INFO,
3335                 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
3336                 pci_dev->mem_resource[0].phys_addr,
3337                 pci_dev->mem_resource[0].addr);
3338
3339         rc = bnxt_hwrm_func_reset(bp);
3340         if (rc) {
3341                 PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
3342                 rc = -EIO;
3343                 goto error_free;
3344         }
3345
3346         if (BNXT_PF(bp)) {
3347                 //if (bp->pf.active_vfs) {
3348                         // TODO: Deallocate VF resources?
3349                 //}
3350                 if (bp->pdev->max_vfs) {
3351                         rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
3352                         if (rc) {
3353                                 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
3354                                 goto error_free;
3355                         }
3356                 } else {
3357                         rc = bnxt_hwrm_allocate_pf_only(bp);
3358                         if (rc) {
3359                                 PMD_DRV_LOG(ERR,
3360                                         "Failed to allocate PF resources\n");
3361                                 goto error_free;
3362                         }
3363                 }
3364         }
3365
3366         bnxt_hwrm_port_led_qcaps(bp);
3367
3368         rc = bnxt_setup_int(bp);
3369         if (rc)
3370                 goto error_free;
3371
3372         rc = bnxt_alloc_mem(bp);
3373         if (rc)
3374                 goto error_free_int;
3375
3376         rc = bnxt_request_int(bp);
3377         if (rc)
3378                 goto error_free_int;
3379
3380         rc = bnxt_alloc_def_cp_ring(bp);
3381         if (rc)
3382                 goto error_free_int;
3383
3384         bnxt_enable_int(bp);
3385
3386         return 0;
3387
3388 error_free_int:
3389         bnxt_disable_int(bp);
3390         bnxt_free_def_cp_ring(bp);
3391         bnxt_hwrm_func_buf_unrgtr(bp);
3392         bnxt_free_int(bp);
3393         bnxt_free_mem(bp);
3394 error_free:
3395         bnxt_dev_uninit(eth_dev);
3396 error:
3397         return rc;
3398 }
3399
3400 static int
3401 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
3402         struct bnxt *bp = eth_dev->data->dev_private;
3403         int rc;
3404
3405         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3406                 return -EPERM;
3407
3408         bnxt_disable_int(bp);
3409         bnxt_free_int(bp);
3410         bnxt_free_mem(bp);
3411         if (eth_dev->data->mac_addrs != NULL) {
3412                 rte_free(eth_dev->data->mac_addrs);
3413                 eth_dev->data->mac_addrs = NULL;
3414         }
3415         if (bp->grp_info != NULL) {
3416                 rte_free(bp->grp_info);
3417                 bp->grp_info = NULL;
3418         }
3419         rc = bnxt_hwrm_func_driver_unregister(bp, 0);
3420         bnxt_free_hwrm_resources(bp);
3421         rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
3422         rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
3423         if (bp->dev_stopped == 0)
3424                 bnxt_dev_close_op(eth_dev);
3425         if (bp->pf.vf_info)
3426                 rte_free(bp->pf.vf_info);
3427         eth_dev->dev_ops = NULL;
3428         eth_dev->rx_pkt_burst = NULL;
3429         eth_dev->tx_pkt_burst = NULL;
3430
3431         return rc;
3432 }
3433
3434 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3435         struct rte_pci_device *pci_dev)
3436 {
3437         return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
3438                 bnxt_dev_init);
3439 }
3440
3441 static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
3442 {
3443         return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
3444 }
3445
3446 static struct rte_pci_driver bnxt_rte_pmd = {
3447         .id_table = bnxt_pci_id_map,
3448         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
3449                 RTE_PCI_DRV_INTR_LSC,
3450         .probe = bnxt_pci_probe,
3451         .remove = bnxt_pci_remove,
3452 };
3453
3454 static bool
3455 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
3456 {
3457         if (strcmp(dev->device->driver->name, drv->driver.name))
3458                 return false;
3459
3460         return true;
3461 }
3462
3463 bool is_bnxt_supported(struct rte_eth_dev *dev)
3464 {
3465         return is_device_supported(dev, &bnxt_rte_pmd);
3466 }
3467
3468 RTE_INIT(bnxt_init_log);
3469 static void
3470 bnxt_init_log(void)
3471 {
3472         bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
3473         if (bnxt_logtype_driver >= 0)
3474                 rte_log_set_level(bnxt_logtype_driver, RTE_LOG_INFO);
3475 }
3476
3477 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
3478 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
3479 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");