net/bnxt: support NIC Partitioning
[dpdk.git] / drivers / net / bnxt / bnxt_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <inttypes.h>
35 #include <stdbool.h>
36
37 #include <rte_dev.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cycles.h>
41
42 #include "bnxt.h"
43 #include "bnxt_cpr.h"
44 #include "bnxt_filter.h"
45 #include "bnxt_hwrm.h"
46 #include "bnxt_ring.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_stats.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #define DRV_MODULE_NAME         "bnxt"
56 static const char bnxt_version[] =
57         "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
58
59 #define PCI_VENDOR_ID_BROADCOM 0x14E4
60
61 #define BROADCOM_DEV_ID_57301 0x16c8
62 #define BROADCOM_DEV_ID_57302 0x16c9
63 #define BROADCOM_DEV_ID_57304_PF 0x16ca
64 #define BROADCOM_DEV_ID_57304_VF 0x16cb
65 #define BROADCOM_DEV_ID_NS2 0x16cd
66 #define BROADCOM_DEV_ID_57402 0x16d0
67 #define BROADCOM_DEV_ID_57404 0x16d1
68 #define BROADCOM_DEV_ID_57406_PF 0x16d2
69 #define BROADCOM_DEV_ID_57406_VF 0x16d3
70 #define BROADCOM_DEV_ID_57406_MF 0x16d4
71 #define BROADCOM_DEV_ID_57314 0x16df
72
73 static struct rte_pci_id bnxt_pci_id_map[] = {
74         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
75         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
76         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
77         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
78         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
79         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
80         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
81         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
82         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
83         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
84         { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
85         { .vendor_id = 0, /* sentinel */ },
86 };
87
88 #define BNXT_ETH_RSS_SUPPORT (  \
89         ETH_RSS_IPV4 |          \
90         ETH_RSS_NONFRAG_IPV4_TCP |      \
91         ETH_RSS_NONFRAG_IPV4_UDP |      \
92         ETH_RSS_IPV6 |          \
93         ETH_RSS_NONFRAG_IPV6_TCP |      \
94         ETH_RSS_NONFRAG_IPV6_UDP)
95
96 /***********************/
97
98 /*
99  * High level utility functions
100  */
101
102 static void bnxt_free_mem(struct bnxt *bp)
103 {
104         bnxt_free_filter_mem(bp);
105         bnxt_free_vnic_attributes(bp);
106         bnxt_free_vnic_mem(bp);
107
108         bnxt_free_stats(bp);
109         bnxt_free_tx_rings(bp);
110         bnxt_free_rx_rings(bp);
111         bnxt_free_def_cp_ring(bp);
112 }
113
114 static int bnxt_alloc_mem(struct bnxt *bp)
115 {
116         int rc;
117
118         /* Default completion ring */
119         rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
120         if (rc)
121                 goto alloc_mem_err;
122
123         rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
124                               bp->def_cp_ring, "def_cp");
125         if (rc)
126                 goto alloc_mem_err;
127
128         rc = bnxt_alloc_vnic_mem(bp);
129         if (rc)
130                 goto alloc_mem_err;
131
132         rc = bnxt_alloc_vnic_attributes(bp);
133         if (rc)
134                 goto alloc_mem_err;
135
136         rc = bnxt_alloc_filter_mem(bp);
137         if (rc)
138                 goto alloc_mem_err;
139
140         return 0;
141
142 alloc_mem_err:
143         bnxt_free_mem(bp);
144         return rc;
145 }
146
147 static int bnxt_init_chip(struct bnxt *bp)
148 {
149         unsigned int i, rss_idx, fw_idx;
150         int rc;
151
152         rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
153         if (rc) {
154                 RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
155                 goto err_out;
156         }
157
158         rc = bnxt_alloc_hwrm_rings(bp);
159         if (rc) {
160                 RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
161                 goto err_out;
162         }
163
164         rc = bnxt_alloc_all_hwrm_ring_grps(bp);
165         if (rc) {
166                 RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
167                 goto err_out;
168         }
169
170         rc = bnxt_mq_rx_configure(bp);
171         if (rc) {
172                 RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
173                 goto err_out;
174         }
175
176         /* VNIC configuration */
177         for (i = 0; i < bp->nr_vnics; i++) {
178                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
179
180                 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
181                 if (rc) {
182                         RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n",
183                                 rc);
184                         goto err_out;
185                 }
186
187                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
188                 if (rc) {
189                         RTE_LOG(ERR, PMD,
190                                 "HWRM vnic ctx alloc failure rc: %x\n", rc);
191                         goto err_out;
192                 }
193
194                 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
195                 if (rc) {
196                         RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc);
197                         goto err_out;
198                 }
199
200                 rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
201                 if (rc) {
202                         RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n",
203                                 rc);
204                         goto err_out;
205                 }
206                 if (vnic->rss_table && vnic->hash_type) {
207                         /*
208                          * Fill the RSS hash & redirection table with
209                          * ring group ids for all VNICs
210                          */
211                         for (rss_idx = 0, fw_idx = 0;
212                              rss_idx < HW_HASH_INDEX_SIZE;
213                              rss_idx++, fw_idx++) {
214                                 if (vnic->fw_grp_ids[fw_idx] ==
215                                     INVALID_HW_RING_ID)
216                                         fw_idx = 0;
217                                 vnic->rss_table[rss_idx] =
218                                                 vnic->fw_grp_ids[fw_idx];
219                         }
220                         rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
221                         if (rc) {
222                                 RTE_LOG(ERR, PMD,
223                                         "HWRM vnic set RSS failure rc: %x\n",
224                                         rc);
225                                 goto err_out;
226                         }
227                 }
228         }
229         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]);
230         if (rc) {
231                 RTE_LOG(ERR, PMD,
232                         "HWRM cfa l2 rx mask failure rc: %x\n", rc);
233                 goto err_out;
234         }
235
236         return 0;
237
238 err_out:
239         bnxt_free_all_hwrm_resources(bp);
240
241         return rc;
242 }
243
244 static int bnxt_shutdown_nic(struct bnxt *bp)
245 {
246         bnxt_free_all_hwrm_resources(bp);
247         bnxt_free_all_filters(bp);
248         bnxt_free_all_vnics(bp);
249         return 0;
250 }
251
252 static int bnxt_init_nic(struct bnxt *bp)
253 {
254         int rc;
255
256         bnxt_init_ring_grps(bp);
257         bnxt_init_vnics(bp);
258         bnxt_init_filters(bp);
259
260         rc = bnxt_init_chip(bp);
261         if (rc)
262                 return rc;
263
264         return 0;
265 }
266
267 /*
268  * Device configuration and status function
269  */
270
271 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
272                                   struct rte_eth_dev_info *dev_info)
273 {
274         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
275         uint16_t max_vnics, i, j, vpool, vrxq;
276
277         /* MAC Specifics */
278         dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
279         dev_info->max_hash_mac_addrs = 0;
280
281         /* PF/VF specifics */
282         if (BNXT_PF(bp)) {
283                 dev_info->max_rx_queues = bp->pf.max_rx_rings;
284                 dev_info->max_tx_queues = bp->pf.max_tx_rings;
285                 dev_info->max_vfs = bp->pf.active_vfs;
286                 dev_info->reta_size = bp->pf.max_rsscos_ctx;
287                 max_vnics = bp->pf.max_vnics;
288         } else {
289                 dev_info->max_rx_queues = bp->vf.max_rx_rings;
290                 dev_info->max_tx_queues = bp->vf.max_tx_rings;
291                 dev_info->reta_size = bp->vf.max_rsscos_ctx;
292                 max_vnics = bp->vf.max_vnics;
293         }
294
295         /* Fast path specifics */
296         dev_info->min_rx_bufsize = 1;
297         dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
298                                   + VLAN_TAG_SIZE;
299         dev_info->rx_offload_capa = 0;
300         dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
301                                         DEV_TX_OFFLOAD_TCP_CKSUM |
302                                         DEV_TX_OFFLOAD_UDP_CKSUM |
303                                         DEV_TX_OFFLOAD_TCP_TSO;
304
305         /* *INDENT-OFF* */
306         dev_info->default_rxconf = (struct rte_eth_rxconf) {
307                 .rx_thresh = {
308                         .pthresh = 8,
309                         .hthresh = 8,
310                         .wthresh = 0,
311                 },
312                 .rx_free_thresh = 32,
313                 .rx_drop_en = 0,
314         };
315
316         dev_info->default_txconf = (struct rte_eth_txconf) {
317                 .tx_thresh = {
318                         .pthresh = 32,
319                         .hthresh = 0,
320                         .wthresh = 0,
321                 },
322                 .tx_free_thresh = 32,
323                 .tx_rs_thresh = 32,
324                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
325                              ETH_TXQ_FLAGS_NOOFFLOADS,
326         };
327         /* *INDENT-ON* */
328
329         /*
330          * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
331          *       need further investigation.
332          */
333
334         /* VMDq resources */
335         vpool = 64; /* ETH_64_POOLS */
336         vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
337         for (i = 0; i < 4; vpool >>= 1, i++) {
338                 if (max_vnics > vpool) {
339                         for (j = 0; j < 5; vrxq >>= 1, j++) {
340                                 if (dev_info->max_rx_queues > vrxq) {
341                                         if (vpool > vrxq)
342                                                 vpool = vrxq;
343                                         goto found;
344                                 }
345                         }
346                         /* Not enough resources to support VMDq */
347                         break;
348                 }
349         }
350         /* Not enough resources to support VMDq */
351         vpool = 0;
352         vrxq = 0;
353 found:
354         dev_info->max_vmdq_pools = vpool;
355         dev_info->vmdq_queue_num = vrxq;
356
357         dev_info->vmdq_pool_base = 0;
358         dev_info->vmdq_queue_base = 0;
359 }
360
361 /* Configure the device based on the configuration provided */
362 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
363 {
364         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
365         int rc;
366
367         bp->rx_queues = (void *)eth_dev->data->rx_queues;
368         bp->tx_queues = (void *)eth_dev->data->tx_queues;
369
370         /* Inherit new configurations */
371         bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
372         bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
373         bp->rx_cp_nr_rings = bp->rx_nr_rings;
374         bp->tx_cp_nr_rings = bp->tx_nr_rings;
375
376         if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
377                 eth_dev->data->mtu =
378                                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
379                                 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
380         rc = bnxt_set_hwrm_link_config(bp, true);
381         return rc;
382 }
383
384 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
385 {
386         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
387         int rc;
388
389         rc = bnxt_hwrm_func_reset(bp);
390         if (rc) {
391                 RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
392                 rc = -1;
393                 goto error;
394         }
395
396         rc = bnxt_alloc_mem(bp);
397         if (rc)
398                 goto error;
399
400         rc = bnxt_init_nic(bp);
401         if (rc)
402                 goto error;
403
404         return 0;
405
406 error:
407         bnxt_shutdown_nic(bp);
408         bnxt_free_tx_mbufs(bp);
409         bnxt_free_rx_mbufs(bp);
410         bnxt_free_mem(bp);
411         return rc;
412 }
413
414 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
415 {
416         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
417
418         eth_dev->data->dev_link.link_status = 1;
419         bnxt_set_hwrm_link_config(bp, true);
420         return 0;
421 }
422
423 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
424 {
425         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
426
427         eth_dev->data->dev_link.link_status = 0;
428         bnxt_set_hwrm_link_config(bp, false);
429         return 0;
430 }
431
432 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
433 {
434         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
435
436         bnxt_free_tx_mbufs(bp);
437         bnxt_free_rx_mbufs(bp);
438         bnxt_free_mem(bp);
439         rte_free(eth_dev->data->mac_addrs);
440 }
441
442 /* Unload the driver, release resources */
443 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
444 {
445         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
446
447         if (bp->eth_dev->data->dev_started) {
448                 /* TBD: STOP HW queues DMA */
449                 eth_dev->data->dev_link.link_status = 0;
450         }
451         bnxt_shutdown_nic(bp);
452 }
453
454 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
455                                     uint32_t index)
456 {
457         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
458         uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
459         struct bnxt_vnic_info *vnic;
460         struct bnxt_filter_info *filter, *temp_filter;
461         int i;
462
463         /*
464          * Loop through all VNICs from the specified filter flow pools to
465          * remove the corresponding MAC addr filter
466          */
467         for (i = 0; i < MAX_FF_POOLS; i++) {
468                 if (!(pool_mask & (1 << i)))
469                         continue;
470
471                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
472                         filter = STAILQ_FIRST(&vnic->filter);
473                         while (filter) {
474                                 temp_filter = STAILQ_NEXT(filter, next);
475                                 if (filter->mac_index == index) {
476                                         STAILQ_REMOVE(&vnic->filter, filter,
477                                                       bnxt_filter_info, next);
478                                         bnxt_hwrm_clear_filter(bp, filter);
479                                         filter->mac_index = INVALID_MAC_INDEX;
480                                         memset(&filter->l2_addr, 0,
481                                                ETHER_ADDR_LEN);
482                                         STAILQ_INSERT_TAIL(
483                                                         &bp->free_filter_list,
484                                                         filter, next);
485                                 }
486                                 filter = temp_filter;
487                         }
488                 }
489         }
490 }
491
492 static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
493                                  struct ether_addr *mac_addr,
494                                  uint32_t index, uint32_t pool)
495 {
496         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
497         struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
498         struct bnxt_filter_info *filter;
499
500         if (!vnic) {
501                 RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
502                 return;
503         }
504         /* Attach requested MAC address to the new l2_filter */
505         STAILQ_FOREACH(filter, &vnic->filter, next) {
506                 if (filter->mac_index == index) {
507                         RTE_LOG(ERR, PMD,
508                                 "MAC addr already existed for pool %d\n", pool);
509                         return;
510                 }
511         }
512         filter = bnxt_alloc_filter(bp);
513         if (!filter) {
514                 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
515                 return;
516         }
517         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
518         filter->mac_index = index;
519         memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
520         bnxt_hwrm_set_filter(bp, vnic, filter);
521 }
522
523 static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
524                                int wait_to_complete)
525 {
526         int rc = 0;
527         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
528         struct rte_eth_link new;
529         unsigned int cnt = BNXT_LINK_WAIT_CNT;
530
531         memset(&new, 0, sizeof(new));
532         do {
533                 /* Retrieve link info from hardware */
534                 rc = bnxt_get_hwrm_link_config(bp, &new);
535                 if (rc) {
536                         new.link_speed = ETH_LINK_SPEED_100M;
537                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
538                         RTE_LOG(ERR, PMD,
539                                 "Failed to retrieve link rc = 0x%x!", rc);
540                         goto out;
541                 }
542                 if (!wait_to_complete)
543                         break;
544
545                 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
546
547         } while (!new.link_status && cnt--);
548
549         /* Timed out or success */
550         if (new.link_status) {
551                 /* Update only if success */
552                 eth_dev->data->dev_link.link_duplex = new.link_duplex;
553                 eth_dev->data->dev_link.link_speed = new.link_speed;
554         }
555         eth_dev->data->dev_link.link_status = new.link_status;
556 out:
557         return rc;
558 }
559
560 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
561 {
562         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
563         struct bnxt_vnic_info *vnic;
564
565         if (bp->vnic_info == NULL)
566                 return;
567
568         vnic = &bp->vnic_info[0];
569
570         vnic->flags |= BNXT_VNIC_INFO_PROMISC;
571         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
572 }
573
574 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
575 {
576         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
577         struct bnxt_vnic_info *vnic;
578
579         if (bp->vnic_info == NULL)
580                 return;
581
582         vnic = &bp->vnic_info[0];
583
584         vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
585         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
586 }
587
588 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
589 {
590         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
591         struct bnxt_vnic_info *vnic;
592
593         if (bp->vnic_info == NULL)
594                 return;
595
596         vnic = &bp->vnic_info[0];
597
598         vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
599         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
600 }
601
602 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
603 {
604         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
605         struct bnxt_vnic_info *vnic;
606
607         if (bp->vnic_info == NULL)
608                 return;
609
610         vnic = &bp->vnic_info[0];
611
612         vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
613         bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
614 }
615
616 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
617                             struct rte_eth_rss_reta_entry64 *reta_conf,
618                             uint16_t reta_size)
619 {
620         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
621         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
622         struct bnxt_vnic_info *vnic;
623         int i;
624
625         if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
626                 return -EINVAL;
627
628         if (reta_size != HW_HASH_INDEX_SIZE) {
629                 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
630                         "(%d) must equal the size supported by the hardware "
631                         "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
632                 return -EINVAL;
633         }
634         /* Update the RSS VNIC(s) */
635         for (i = 0; i < MAX_FF_POOLS; i++) {
636                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
637                         memcpy(vnic->rss_table, reta_conf, reta_size);
638
639                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
640                 }
641         }
642         return 0;
643 }
644
645 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
646                               struct rte_eth_rss_reta_entry64 *reta_conf,
647                               uint16_t reta_size)
648 {
649         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
650         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
651
652         /* Retrieve from the default VNIC */
653         if (!vnic)
654                 return -EINVAL;
655         if (!vnic->rss_table)
656                 return -EINVAL;
657
658         if (reta_size != HW_HASH_INDEX_SIZE) {
659                 RTE_LOG(ERR, PMD, "The configured hash table lookup size "
660                         "(%d) must equal the size supported by the hardware "
661                         "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
662                 return -EINVAL;
663         }
664         /* EW - need to revisit here copying from u64 to u16 */
665         memcpy(reta_conf, vnic->rss_table, reta_size);
666
667         return 0;
668 }
669
670 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
671                                    struct rte_eth_rss_conf *rss_conf)
672 {
673         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
674         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
675         struct bnxt_vnic_info *vnic;
676         uint16_t hash_type = 0;
677         int i;
678
679         /*
680          * If RSS enablement were different than dev_configure,
681          * then return -EINVAL
682          */
683         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
684                 if (!rss_conf->rss_hf)
685                         return -EINVAL;
686         } else {
687                 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
688                         return -EINVAL;
689         }
690         if (rss_conf->rss_hf & ETH_RSS_IPV4)
691                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
692         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
693                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
694         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
695                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
696         if (rss_conf->rss_hf & ETH_RSS_IPV6)
697                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
698         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
699                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
700         if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
701                 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
702
703         /* Update the RSS VNIC(s) */
704         for (i = 0; i < MAX_FF_POOLS; i++) {
705                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
706                         vnic->hash_type = hash_type;
707
708                         /*
709                          * Use the supplied key if the key length is
710                          * acceptable and the rss_key is not NULL
711                          */
712                         if (rss_conf->rss_key &&
713                             rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
714                                 memcpy(vnic->rss_hash_key, rss_conf->rss_key,
715                                        rss_conf->rss_key_len);
716
717                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
718                 }
719         }
720         return 0;
721 }
722
723 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
724                                      struct rte_eth_rss_conf *rss_conf)
725 {
726         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
727         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
728         int len;
729         uint32_t hash_types;
730
731         /* RSS configuration is the same for all VNICs */
732         if (vnic && vnic->rss_hash_key) {
733                 if (rss_conf->rss_key) {
734                         len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
735                               rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
736                         memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
737                 }
738
739                 hash_types = vnic->hash_type;
740                 rss_conf->rss_hf = 0;
741                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
742                         rss_conf->rss_hf |= ETH_RSS_IPV4;
743                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
744                 }
745                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
746                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
747                         hash_types &=
748                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
749                 }
750                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
751                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
752                         hash_types &=
753                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
754                 }
755                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
756                         rss_conf->rss_hf |= ETH_RSS_IPV6;
757                         hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
758                 }
759                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
760                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
761                         hash_types &=
762                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
763                 }
764                 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
765                         rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
766                         hash_types &=
767                                 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
768                 }
769                 if (hash_types) {
770                         RTE_LOG(ERR, PMD,
771                                 "Unknwon RSS config from firmware (%08x), RSS disabled",
772                                 vnic->hash_type);
773                         return -ENOTSUP;
774                 }
775         } else {
776                 rss_conf->rss_hf = 0;
777         }
778         return 0;
779 }
780
781 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
782                                struct rte_eth_fc_conf *fc_conf __rte_unused)
783 {
784         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
785         struct rte_eth_link link_info;
786         int rc;
787
788         rc = bnxt_get_hwrm_link_config(bp, &link_info);
789         if (rc)
790                 return rc;
791
792         memset(fc_conf, 0, sizeof(*fc_conf));
793         if (bp->link_info.auto_pause)
794                 fc_conf->autoneg = 1;
795         switch (bp->link_info.pause) {
796         case 0:
797                 fc_conf->mode = RTE_FC_NONE;
798                 break;
799         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
800                 fc_conf->mode = RTE_FC_TX_PAUSE;
801                 break;
802         case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
803                 fc_conf->mode = RTE_FC_RX_PAUSE;
804                 break;
805         case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
806                         HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
807                 fc_conf->mode = RTE_FC_FULL;
808                 break;
809         }
810         return 0;
811 }
812
813 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
814                                struct rte_eth_fc_conf *fc_conf)
815 {
816         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
817
818         if (BNXT_NPAR_PF(bp))
819                 return 0;
820
821         switch (fc_conf->mode) {
822         case RTE_FC_NONE:
823                 bp->link_info.auto_pause = 0;
824                 bp->link_info.force_pause = 0;
825                 break;
826         case RTE_FC_RX_PAUSE:
827                 if (fc_conf->autoneg) {
828                         bp->link_info.auto_pause =
829                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
830                         bp->link_info.force_pause = 0;
831                 } else {
832                         bp->link_info.auto_pause = 0;
833                         bp->link_info.force_pause =
834                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
835                 }
836                 break;
837         case RTE_FC_TX_PAUSE:
838                 if (fc_conf->autoneg) {
839                         bp->link_info.auto_pause =
840                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
841                         bp->link_info.force_pause = 0;
842                 } else {
843                         bp->link_info.auto_pause = 0;
844                         bp->link_info.force_pause =
845                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
846                 }
847                 break;
848         case RTE_FC_FULL:
849                 if (fc_conf->autoneg) {
850                         bp->link_info.auto_pause =
851                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
852                                         HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
853                         bp->link_info.force_pause = 0;
854                 } else {
855                         bp->link_info.auto_pause = 0;
856                         bp->link_info.force_pause =
857                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
858                                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
859                 }
860                 break;
861         }
862         return bnxt_set_hwrm_link_config(bp, true);
863 }
864
865 /*
866  * Initialization
867  */
868
869 static struct eth_dev_ops bnxt_dev_ops = {
870         .dev_infos_get = bnxt_dev_info_get_op,
871         .dev_close = bnxt_dev_close_op,
872         .dev_configure = bnxt_dev_configure_op,
873         .dev_start = bnxt_dev_start_op,
874         .dev_stop = bnxt_dev_stop_op,
875         .dev_set_link_up = bnxt_dev_set_link_up_op,
876         .dev_set_link_down = bnxt_dev_set_link_down_op,
877         .stats_get = bnxt_stats_get_op,
878         .stats_reset = bnxt_stats_reset_op,
879         .rx_queue_setup = bnxt_rx_queue_setup_op,
880         .rx_queue_release = bnxt_rx_queue_release_op,
881         .tx_queue_setup = bnxt_tx_queue_setup_op,
882         .tx_queue_release = bnxt_tx_queue_release_op,
883         .reta_update = bnxt_reta_update_op,
884         .reta_query = bnxt_reta_query_op,
885         .rss_hash_update = bnxt_rss_hash_update_op,
886         .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
887         .link_update = bnxt_link_update_op,
888         .promiscuous_enable = bnxt_promiscuous_enable_op,
889         .promiscuous_disable = bnxt_promiscuous_disable_op,
890         .allmulticast_enable = bnxt_allmulticast_enable_op,
891         .allmulticast_disable = bnxt_allmulticast_disable_op,
892         .mac_addr_add = bnxt_mac_addr_add_op,
893         .mac_addr_remove = bnxt_mac_addr_remove_op,
894         .flow_ctrl_get = bnxt_flow_ctrl_get_op,
895         .flow_ctrl_set = bnxt_flow_ctrl_set_op,
896 };
897
898 static bool bnxt_vf_pciid(uint16_t id)
899 {
900         if (id == BROADCOM_DEV_ID_57304_VF ||
901             id == BROADCOM_DEV_ID_57406_VF)
902                 return true;
903         return false;
904 }
905
906 static int bnxt_init_board(struct rte_eth_dev *eth_dev)
907 {
908         int rc;
909         struct bnxt *bp = eth_dev->data->dev_private;
910
911         /* enable device (incl. PCI PM wakeup), and bus-mastering */
912         if (!eth_dev->pci_dev->mem_resource[0].addr) {
913                 RTE_LOG(ERR, PMD,
914                         "Cannot find PCI device base address, aborting\n");
915                 rc = -ENODEV;
916                 goto init_err_disable;
917         }
918
919         bp->eth_dev = eth_dev;
920         bp->pdev = eth_dev->pci_dev;
921
922         bp->bar0 = (void *)eth_dev->pci_dev->mem_resource[0].addr;
923         if (!bp->bar0) {
924                 RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
925                 rc = -ENOMEM;
926                 goto init_err_release;
927         }
928         return 0;
929
930 init_err_release:
931         if (bp->bar0)
932                 bp->bar0 = NULL;
933
934 init_err_disable:
935
936         return rc;
937 }
938
939 static int
940 bnxt_dev_init(struct rte_eth_dev *eth_dev)
941 {
942         static int version_printed;
943         struct bnxt *bp;
944         int rc;
945
946         if (version_printed++ == 0)
947                 RTE_LOG(INFO, PMD, "%s", bnxt_version);
948
949         rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
950         bp = eth_dev->data->dev_private;
951
952         if (bnxt_vf_pciid(eth_dev->pci_dev->id.device_id))
953                 bp->flags |= BNXT_FLAG_VF;
954
955         rc = bnxt_init_board(eth_dev);
956         if (rc) {
957                 RTE_LOG(ERR, PMD,
958                         "Board initialization failed rc: %x\n", rc);
959                 goto error;
960         }
961         eth_dev->dev_ops = &bnxt_dev_ops;
962         eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
963         eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
964
965         rc = bnxt_alloc_hwrm_resources(bp);
966         if (rc) {
967                 RTE_LOG(ERR, PMD,
968                         "hwrm resource allocation failure rc: %x\n", rc);
969                 goto error_free;
970         }
971         rc = bnxt_hwrm_ver_get(bp);
972         if (rc)
973                 goto error_free;
974         bnxt_hwrm_queue_qportcfg(bp);
975
976         bnxt_hwrm_func_qcfg(bp);
977
978         /* Get the MAX capabilities for this function */
979         rc = bnxt_hwrm_func_qcaps(bp);
980         if (rc) {
981                 RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
982                 goto error_free;
983         }
984         eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
985                                         ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
986         if (eth_dev->data->mac_addrs == NULL) {
987                 RTE_LOG(ERR, PMD,
988                         "Failed to alloc %u bytes needed to store MAC addr tbl",
989                         ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
990                 rc = -ENOMEM;
991                 goto error_free;
992         }
993         /* Copy the permanent MAC from the qcap response address now. */
994         if (BNXT_PF(bp))
995                 memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr));
996         else
997                 memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr));
998         memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
999         bp->grp_info = rte_zmalloc("bnxt_grp_info",
1000                                 sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
1001         if (!bp->grp_info) {
1002                 RTE_LOG(ERR, PMD,
1003                         "Failed to alloc %zu bytes needed to store group info table\n",
1004                         sizeof(*bp->grp_info) * bp->max_ring_grps);
1005                 rc = -ENOMEM;
1006                 goto error_free;
1007         }
1008
1009         rc = bnxt_hwrm_func_driver_register(bp, 0,
1010                                             bp->pf.vf_req_fwd);
1011         if (rc) {
1012                 RTE_LOG(ERR, PMD,
1013                         "Failed to register driver");
1014                 rc = -EBUSY;
1015                 goto error_free;
1016         }
1017
1018         RTE_LOG(INFO, PMD,
1019                 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
1020                 eth_dev->pci_dev->mem_resource[0].phys_addr,
1021                 eth_dev->pci_dev->mem_resource[0].addr);
1022
1023         return 0;
1024
1025 error_free:
1026         eth_dev->driver->eth_dev_uninit(eth_dev);
1027 error:
1028         return rc;
1029 }
1030
1031 static int
1032 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
1033         struct bnxt *bp = eth_dev->data->dev_private;
1034         int rc;
1035
1036         if (eth_dev->data->mac_addrs)
1037                 rte_free(eth_dev->data->mac_addrs);
1038         if (bp->grp_info)
1039                 rte_free(bp->grp_info);
1040         rc = bnxt_hwrm_func_driver_unregister(bp, 0);
1041         bnxt_free_hwrm_resources(bp);
1042         return rc;
1043 }
1044
1045 static struct eth_driver bnxt_rte_pmd = {
1046         .pci_drv = {
1047                     .id_table = bnxt_pci_id_map,
1048                     .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1049                     .probe = rte_eth_dev_pci_probe,
1050                     .remove = rte_eth_dev_pci_remove
1051                     },
1052         .eth_dev_init = bnxt_dev_init,
1053         .eth_dev_uninit = bnxt_dev_uninit,
1054         .dev_private_size = sizeof(struct bnxt),
1055 };
1056
1057 DRIVER_REGISTER_PCI(net_bnxt, bnxt_rte_pmd.pci_drv);
1058 DRIVER_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);