22e18d6d3cfe3149924283f556c7e92b661da916
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <unistd.h>
37
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
44
45 #include "bnxt.h"
46 #include "bnxt_cpr.h"
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
49 #include "bnxt_rxq.h"
50 #include "bnxt_rxr.h"
51 #include "bnxt_ring.h"
52 #include "bnxt_txq.h"
53 #include "bnxt_txr.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
56
57 #include <rte_io.h>
58
59 #define HWRM_CMD_TIMEOUT                2000
60
61 struct bnxt_plcmodes_cfg {
62         uint32_t        flags;
63         uint16_t        jumbo_thresh;
64         uint16_t        hds_offset;
65         uint16_t        hds_threshold;
66 };
67
68 static int page_getenum(size_t size)
69 {
70         if (size <= 1 << 4)
71                 return 4;
72         if (size <= 1 << 12)
73                 return 12;
74         if (size <= 1 << 13)
75                 return 13;
76         if (size <= 1 << 16)
77                 return 16;
78         if (size <= 1 << 21)
79                 return 21;
80         if (size <= 1 << 22)
81                 return 22;
82         if (size <= 1 << 30)
83                 return 30;
84         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85         return sizeof(void *) * 8 - 1;
86 }
87
88 static int page_roundup(size_t size)
89 {
90         return 1 << page_getenum(size);
91 }
92
93 /*
94  * HWRM Functions (sent to HWRM)
95  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97  * command was failed by the ChiMP.
98  */
99
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
101                                         uint32_t msg_len)
102 {
103         unsigned int i;
104         struct input *req = msg;
105         struct output *resp = bp->hwrm_cmd_resp_addr;
106         uint32_t *data = msg;
107         uint8_t *bar;
108         uint8_t *valid;
109
110         /* Write request msg to hwrm channel */
111         for (i = 0; i < msg_len; i += 4) {
112                 bar = (uint8_t *)bp->bar0 + i;
113                 rte_write32(*data, bar);
114                 data++;
115         }
116
117         /* Zero the rest of the request space */
118         for (; i < bp->max_req_len; i += 4) {
119                 bar = (uint8_t *)bp->bar0 + i;
120                 rte_write32(0, bar);
121         }
122
123         /* Ring channel doorbell */
124         bar = (uint8_t *)bp->bar0 + 0x100;
125         rte_write32(1, bar);
126
127         /* Poll for the valid bit */
128         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129                 /* Sanity check on the resp->resp_len */
130                 rte_rmb();
131                 if (resp->resp_len && resp->resp_len <=
132                                 bp->max_resp_len) {
133                         /* Last byte of resp contains the valid key */
134                         valid = (uint8_t *)resp + resp->resp_len - 1;
135                         if (*valid == HWRM_RESP_VALID_KEY)
136                                 break;
137                 }
138                 rte_delay_us(600);
139         }
140
141         if (i >= HWRM_CMD_TIMEOUT) {
142                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
143                         req->req_type);
144                 goto err_ret;
145         }
146         return 0;
147
148 err_ret:
149         return -1;
150 }
151
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
153 {
154         int rc;
155
156         rte_spinlock_lock(&bp->hwrm_lock);
157         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158         rte_spinlock_unlock(&bp->hwrm_lock);
159         return rc;
160 }
161
162 #define HWRM_PREP(req, type, cr, resp) \
163         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165         req.cmpl_ring = rte_cpu_to_le_16(cr); \
166         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167         req.target_id = rte_cpu_to_le_16(0xffff); \
168         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
169
170 #define HWRM_CHECK_RESULT \
171         { \
172                 if (rc) { \
173                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
174                                 __func__, rc); \
175                         return rc; \
176                 } \
177                 if (resp->error_code) { \
178                         rc = rte_le_to_cpu_16(resp->error_code); \
179                         if (resp->resp_len >= 16) { \
180                                 struct hwrm_err_output *tmp_hwrm_err_op = \
181                                                         (void *)resp; \
182                                 RTE_LOG(ERR, PMD, \
183                                         "%s error %d:%d:%08x:%04x\n", \
184                                         __func__, \
185                                         rc, tmp_hwrm_err_op->cmd_err, \
186                                         rte_le_to_cpu_32(\
187                                                 tmp_hwrm_err_op->opaque_0), \
188                                         rte_le_to_cpu_16(\
189                                                 tmp_hwrm_err_op->opaque_1)); \
190                         } \
191                         else { \
192                                 RTE_LOG(ERR, PMD, \
193                                         "%s error %d\n", __func__, rc); \
194                         } \
195                         return rc; \
196                 } \
197         }
198
199 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
200 {
201         int rc = 0;
202         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
203         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
204
205         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
206         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
207         req.mask = 0;
208
209         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
210
211         HWRM_CHECK_RESULT;
212
213         return rc;
214 }
215
216 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
217 {
218         int rc = 0;
219         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
220         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
221         uint32_t mask = 0;
222
223         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
224         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
225
226         /* FIXME add multicast flag, when multicast adding options is supported
227          * by ethtool.
228          */
229         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
230                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
231         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
232                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
233         if (vnic->mc_addr_cnt) {
234                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
235                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
236                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
237         }
238         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
239                                     mask);
240
241         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
242
243         HWRM_CHECK_RESULT;
244
245         return rc;
246 }
247
248 int bnxt_hwrm_clear_filter(struct bnxt *bp,
249                            struct bnxt_filter_info *filter)
250 {
251         int rc = 0;
252         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
253         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
254
255         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
256
257         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
258
259         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
260
261         HWRM_CHECK_RESULT;
262
263         filter->fw_l2_filter_id = -1;
264
265         return 0;
266 }
267
268 int bnxt_hwrm_set_filter(struct bnxt *bp,
269                          uint16_t dst_id,
270                          struct bnxt_filter_info *filter)
271 {
272         int rc = 0;
273         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
274         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
275         uint32_t enables = 0;
276
277         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
278
279         req.flags = rte_cpu_to_le_32(filter->flags);
280
281         enables = filter->enables |
282               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
283         req.dst_id = rte_cpu_to_le_16(dst_id);
284
285         if (enables &
286             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
287                 memcpy(req.l2_addr, filter->l2_addr,
288                        ETHER_ADDR_LEN);
289         if (enables &
290             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
291                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
292                        ETHER_ADDR_LEN);
293         if (enables &
294             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
295                 req.l2_ovlan = filter->l2_ovlan;
296         if (enables &
297             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
298                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
299
300         req.enables = rte_cpu_to_le_32(enables);
301
302         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
303
304         HWRM_CHECK_RESULT;
305
306         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
307
308         return rc;
309 }
310
311 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
312 {
313         int rc = 0;
314         struct hwrm_func_qcaps_input req = {.req_type = 0 };
315         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
316         uint16_t new_max_vfs;
317         int i;
318
319         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
320
321         req.fid = rte_cpu_to_le_16(0xffff);
322
323         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
324
325         HWRM_CHECK_RESULT;
326
327         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
328         if (BNXT_PF(bp)) {
329                 bp->pf.port_id = resp->port_id;
330                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
331                 new_max_vfs = bp->pdev->max_vfs;
332                 if (new_max_vfs != bp->pf.max_vfs) {
333                         if (bp->pf.vf_info)
334                                 rte_free(bp->pf.vf_info);
335                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
336                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
337                         bp->pf.max_vfs = new_max_vfs;
338                         for (i = 0; i < new_max_vfs; i++) {
339                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
340                                 bp->pf.vf_info[i].vlan_table =
341                                         rte_zmalloc("VF VLAN table",
342                                                     getpagesize(),
343                                                     getpagesize());
344                                 if (bp->pf.vf_info[i].vlan_table == NULL)
345                                         RTE_LOG(ERR, PMD,
346                                         "Fail to alloc VLAN table for VF %d\n",
347                                         i);
348                                 else
349                                         rte_mem_lock_page(
350                                                 bp->pf.vf_info[i].vlan_table);
351                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
352                         }
353                 }
354         }
355
356         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
357         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
358         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
359         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
360         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
361         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
362         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
363         /* TODO: For now, do not support VMDq/RFS on VFs. */
364         if (BNXT_PF(bp)) {
365                 if (bp->pf.max_vfs)
366                         bp->max_vnics = 1;
367                 else
368                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
369         } else {
370                 bp->max_vnics = 1;
371         }
372         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
373         if (BNXT_PF(bp))
374                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
375
376         return rc;
377 }
378
379 int bnxt_hwrm_func_reset(struct bnxt *bp)
380 {
381         int rc = 0;
382         struct hwrm_func_reset_input req = {.req_type = 0 };
383         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
384
385         HWRM_PREP(req, FUNC_RESET, -1, resp);
386
387         req.enables = rte_cpu_to_le_32(0);
388
389         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
390
391         HWRM_CHECK_RESULT;
392
393         return rc;
394 }
395
396 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
397 {
398         int rc;
399         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
400         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
401
402         if (bp->flags & BNXT_FLAG_REGISTERED)
403                 return 0;
404
405         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
406         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
407                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
408         req.ver_maj = RTE_VER_YEAR;
409         req.ver_min = RTE_VER_MONTH;
410         req.ver_upd = RTE_VER_MINOR;
411
412         if (BNXT_PF(bp)) {
413                 req.enables |= rte_cpu_to_le_32(
414                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
415                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
416                        RTE_MIN(sizeof(req.vf_req_fwd),
417                                sizeof(bp->pf.vf_req_fwd)));
418         }
419
420         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
421         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
422
423         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
424
425         HWRM_CHECK_RESULT;
426
427         bp->flags |= BNXT_FLAG_REGISTERED;
428
429         return rc;
430 }
431
432 int bnxt_hwrm_ver_get(struct bnxt *bp)
433 {
434         int rc = 0;
435         struct hwrm_ver_get_input req = {.req_type = 0 };
436         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
437         uint32_t my_version;
438         uint32_t fw_version;
439         uint16_t max_resp_len;
440         char type[RTE_MEMZONE_NAMESIZE];
441
442         HWRM_PREP(req, VER_GET, -1, resp);
443
444         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
445         req.hwrm_intf_min = HWRM_VERSION_MINOR;
446         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
447
448         /*
449          * Hold the lock since we may be adjusting the response pointers.
450          */
451         rte_spinlock_lock(&bp->hwrm_lock);
452         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
453
454         HWRM_CHECK_RESULT;
455
456         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
457                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
458                 resp->hwrm_intf_upd,
459                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
460         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
461                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
462         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
463                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
464
465         my_version = HWRM_VERSION_MAJOR << 16;
466         my_version |= HWRM_VERSION_MINOR << 8;
467         my_version |= HWRM_VERSION_UPDATE;
468
469         fw_version = resp->hwrm_intf_maj << 16;
470         fw_version |= resp->hwrm_intf_min << 8;
471         fw_version |= resp->hwrm_intf_upd;
472
473         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
474                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
475                 rc = -EINVAL;
476                 goto error;
477         }
478
479         if (my_version != fw_version) {
480                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
481                 if (my_version < fw_version) {
482                         RTE_LOG(INFO, PMD,
483                                 "Firmware API version is newer than driver.\n");
484                         RTE_LOG(INFO, PMD,
485                                 "The driver may be missing features.\n");
486                 } else {
487                         RTE_LOG(INFO, PMD,
488                                 "Firmware API version is older than driver.\n");
489                         RTE_LOG(INFO, PMD,
490                                 "Not all driver features may be functional.\n");
491                 }
492         }
493
494         if (bp->max_req_len > resp->max_req_win_len) {
495                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
496                 rc = -EINVAL;
497         }
498         bp->max_req_len = resp->max_req_win_len;
499         max_resp_len = resp->max_resp_len;
500         if (bp->max_resp_len != max_resp_len) {
501                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
502                         bp->pdev->addr.domain, bp->pdev->addr.bus,
503                         bp->pdev->addr.devid, bp->pdev->addr.function);
504
505                 rte_free(bp->hwrm_cmd_resp_addr);
506
507                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
508                 if (bp->hwrm_cmd_resp_addr == NULL) {
509                         rc = -ENOMEM;
510                         goto error;
511                 }
512                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
513                 bp->hwrm_cmd_resp_dma_addr =
514                         rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
515                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
516                         RTE_LOG(ERR, PMD,
517                         "Unable to map response buffer to physical memory.\n");
518                         rc = -ENOMEM;
519                         goto error;
520                 }
521                 bp->max_resp_len = max_resp_len;
522         }
523
524 error:
525         rte_spinlock_unlock(&bp->hwrm_lock);
526         return rc;
527 }
528
529 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
530 {
531         int rc;
532         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
533         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
534
535         if (!(bp->flags & BNXT_FLAG_REGISTERED))
536                 return 0;
537
538         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
539         req.flags = flags;
540
541         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
542
543         HWRM_CHECK_RESULT;
544
545         bp->flags &= ~BNXT_FLAG_REGISTERED;
546
547         return rc;
548 }
549
550 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
551 {
552         int rc = 0;
553         struct hwrm_port_phy_cfg_input req = {0};
554         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
555         uint32_t enables = 0;
556
557         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
558
559         if (conf->link_up) {
560                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
561                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
562                 /*
563                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
564                  * any auto mode, even "none".
565                  */
566                 if (!conf->link_speed) {
567                         req.auto_mode |= conf->auto_mode;
568                         enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
569                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
570                         enables |=
571                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
572                         req.auto_link_speed = bp->link_info.auto_link_speed;
573                         enables |=
574                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
575                 }
576                 req.auto_duplex = conf->duplex;
577                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
578                 req.auto_pause = conf->auto_pause;
579                 req.force_pause = conf->force_pause;
580                 /* Set force_pause if there is no auto or if there is a force */
581                 if (req.auto_pause && !req.force_pause)
582                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
583                 else
584                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
585
586                 req.enables = rte_cpu_to_le_32(enables);
587         } else {
588                 req.flags =
589                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
590                 RTE_LOG(INFO, PMD, "Force Link Down\n");
591         }
592
593         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
594
595         HWRM_CHECK_RESULT;
596
597         return rc;
598 }
599
600 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
601                                    struct bnxt_link_info *link_info)
602 {
603         int rc = 0;
604         struct hwrm_port_phy_qcfg_input req = {0};
605         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
606
607         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
608
609         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
610
611         HWRM_CHECK_RESULT;
612
613         link_info->phy_link_status = resp->link;
614         if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
615                 link_info->link_up = 1;
616                 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
617         } else {
618                 link_info->link_up = 0;
619                 link_info->link_speed = 0;
620         }
621         link_info->duplex = resp->duplex;
622         link_info->pause = resp->pause;
623         link_info->auto_pause = resp->auto_pause;
624         link_info->force_pause = resp->force_pause;
625         link_info->auto_mode = resp->auto_mode;
626
627         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
628         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
629         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
630         link_info->phy_ver[0] = resp->phy_maj;
631         link_info->phy_ver[1] = resp->phy_min;
632         link_info->phy_ver[2] = resp->phy_bld;
633
634         return rc;
635 }
636
637 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
638 {
639         int rc = 0;
640         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
641         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
642
643         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
644
645         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
646
647         HWRM_CHECK_RESULT;
648
649 #define GET_QUEUE_INFO(x) \
650         bp->cos_queue[x].id = resp->queue_id##x; \
651         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
652
653         GET_QUEUE_INFO(0);
654         GET_QUEUE_INFO(1);
655         GET_QUEUE_INFO(2);
656         GET_QUEUE_INFO(3);
657         GET_QUEUE_INFO(4);
658         GET_QUEUE_INFO(5);
659         GET_QUEUE_INFO(6);
660         GET_QUEUE_INFO(7);
661
662         return rc;
663 }
664
665 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
666                          struct bnxt_ring *ring,
667                          uint32_t ring_type, uint32_t map_index,
668                          uint32_t stats_ctx_id)
669 {
670         int rc = 0;
671         struct hwrm_ring_alloc_input req = {.req_type = 0 };
672         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
673
674         HWRM_PREP(req, RING_ALLOC, -1, resp);
675
676         req.enables = rte_cpu_to_le_32(0);
677
678         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
679         req.fbo = rte_cpu_to_le_32(0);
680         /* Association of ring index with doorbell index */
681         req.logical_id = rte_cpu_to_le_16(map_index);
682
683         switch (ring_type) {
684         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
685                 req.queue_id = bp->cos_queue[0].id;
686                 /* FALLTHROUGH */
687         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
688                 req.ring_type = ring_type;
689                 req.cmpl_ring_id =
690                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
691                 req.length = rte_cpu_to_le_32(ring->ring_size);
692                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
693                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
694                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
695                 break;
696         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
697                 req.ring_type = ring_type;
698                 /*
699                  * TODO: Some HWRM versions crash with
700                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
701                  */
702                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
703                 req.length = rte_cpu_to_le_32(ring->ring_size);
704                 break;
705         default:
706                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
707                         ring_type);
708                 return -1;
709         }
710
711         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
712
713         if (rc || resp->error_code) {
714                 if (rc == 0 && resp->error_code)
715                         rc = rte_le_to_cpu_16(resp->error_code);
716                 switch (ring_type) {
717                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
718                         RTE_LOG(ERR, PMD,
719                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
720                         return rc;
721                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
722                         RTE_LOG(ERR, PMD,
723                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
724                         return rc;
725                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
726                         RTE_LOG(ERR, PMD,
727                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
728                         return rc;
729                 default:
730                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
731                         return rc;
732                 }
733         }
734
735         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
736         return rc;
737 }
738
739 int bnxt_hwrm_ring_free(struct bnxt *bp,
740                         struct bnxt_ring *ring, uint32_t ring_type)
741 {
742         int rc;
743         struct hwrm_ring_free_input req = {.req_type = 0 };
744         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
745
746         HWRM_PREP(req, RING_FREE, -1, resp);
747
748         req.ring_type = ring_type;
749         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
750
751         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
752
753         if (rc || resp->error_code) {
754                 if (rc == 0 && resp->error_code)
755                         rc = rte_le_to_cpu_16(resp->error_code);
756
757                 switch (ring_type) {
758                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
759                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
760                                 rc);
761                         return rc;
762                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
763                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
764                                 rc);
765                         return rc;
766                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
767                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
768                                 rc);
769                         return rc;
770                 default:
771                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
772                         return rc;
773                 }
774         }
775         return 0;
776 }
777
778 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
779 {
780         int rc = 0;
781         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
782         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
783
784         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
785
786         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
787         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
788         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
789         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
790
791         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
792
793         HWRM_CHECK_RESULT;
794
795         bp->grp_info[idx].fw_grp_id =
796             rte_le_to_cpu_16(resp->ring_group_id);
797
798         return rc;
799 }
800
801 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
802 {
803         int rc;
804         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
805         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
806
807         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
808
809         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
810
811         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
812
813         HWRM_CHECK_RESULT;
814
815         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
816         return rc;
817 }
818
819 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
820 {
821         int rc = 0;
822         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
823         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
824
825         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
826
827         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
828                 return rc;
829
830         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
831         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
832
833         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
834
835         HWRM_CHECK_RESULT;
836
837         return rc;
838 }
839
840 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
841                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
842 {
843         int rc;
844         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
845         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
846
847         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
848
849         req.update_period_ms = rte_cpu_to_le_32(1000);
850
851         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
852         req.stats_dma_addr =
853             rte_cpu_to_le_64(cpr->hw_stats_map);
854
855         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
856
857         HWRM_CHECK_RESULT;
858
859         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
860         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
861
862         return rc;
863 }
864
865 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
866                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
867 {
868         int rc;
869         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
870         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
871
872         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
873
874         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
875         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
876
877         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
878
879         HWRM_CHECK_RESULT;
880
881         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
882         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
883
884         return rc;
885 }
886
887 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
888 {
889         int rc = 0, i, j;
890         struct hwrm_vnic_alloc_input req = { 0 };
891         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
892
893         /* map ring groups to this vnic */
894         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
895                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
896                         RTE_LOG(ERR, PMD,
897                                 "Not enough ring groups avail:%x req:%x\n", j,
898                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
899                         break;
900                 }
901                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
902         }
903         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
904         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
905         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
906         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
907         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
908                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
909         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
910
911         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
912
913         HWRM_CHECK_RESULT;
914
915         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
916         return rc;
917 }
918
919 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
920                                         struct bnxt_vnic_info *vnic,
921                                         struct bnxt_plcmodes_cfg *pmode)
922 {
923         int rc = 0;
924         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
925         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
926
927         HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
928
929         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
930
931         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
932
933         HWRM_CHECK_RESULT;
934
935         pmode->flags = rte_le_to_cpu_32(resp->flags);
936         /* dflt_vnic bit doesn't exist in the _cfg command */
937         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
938         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
939         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
940         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
941
942         return rc;
943 }
944
945 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
946                                        struct bnxt_vnic_info *vnic,
947                                        struct bnxt_plcmodes_cfg *pmode)
948 {
949         int rc = 0;
950         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
951         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
952
953         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
954
955         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
956         req.flags = rte_cpu_to_le_32(pmode->flags);
957         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
958         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
959         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
960         req.enables = rte_cpu_to_le_32(
961             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
962             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
963             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
964         );
965
966         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
967
968         HWRM_CHECK_RESULT;
969
970         return rc;
971 }
972
973 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
974 {
975         int rc = 0;
976         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
977         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
978         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
979         struct bnxt_plcmodes_cfg pmodes;
980
981         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
982         if (rc)
983                 return rc;
984
985         HWRM_PREP(req, VNIC_CFG, -1, resp);
986
987         /* Only RSS support for now TBD: COS & LB */
988         req.enables =
989             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
990                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
991         if (vnic->lb_rule != 0xffff)
992                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
993         if (vnic->cos_rule != 0xffff)
994                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
995         if (vnic->rss_rule != 0xffff)
996                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
997         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
998         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
999         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1000         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1001         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1002         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1003         req.mru = rte_cpu_to_le_16(vnic->mru);
1004         if (vnic->func_default)
1005                 req.flags |=
1006                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1007         if (vnic->vlan_strip)
1008                 req.flags |=
1009                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1010         if (vnic->bd_stall)
1011                 req.flags |=
1012                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1013         if (vnic->roce_dual)
1014                 req.flags |= rte_cpu_to_le_32(
1015                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1016         if (vnic->roce_only)
1017                 req.flags |= rte_cpu_to_le_32(
1018                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1019         if (vnic->rss_dflt_cr)
1020                 req.flags |= rte_cpu_to_le_32(
1021                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1022
1023         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1024
1025         HWRM_CHECK_RESULT;
1026
1027         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1028
1029         return rc;
1030 }
1031
1032 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1033                 int16_t fw_vf_id)
1034 {
1035         int rc = 0;
1036         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1037         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1038
1039         HWRM_PREP(req, VNIC_QCFG, -1, resp);
1040
1041         req.enables =
1042                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1043         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1044         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1045
1046         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1047
1048         HWRM_CHECK_RESULT;
1049
1050         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1051         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1052         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1053         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1054         vnic->mru = rte_le_to_cpu_16(resp->mru);
1055         vnic->func_default = rte_le_to_cpu_32(
1056                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1057         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1058                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1059         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1060                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1061         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1062                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1063         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1064                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1065         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1066                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1067
1068         return rc;
1069 }
1070
1071 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1072 {
1073         int rc = 0;
1074         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1075         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1076                                                 bp->hwrm_cmd_resp_addr;
1077
1078         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1079
1080         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1081
1082         HWRM_CHECK_RESULT;
1083
1084         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1085
1086         return rc;
1087 }
1088
1089 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1090 {
1091         int rc = 0;
1092         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1093         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1094                                                 bp->hwrm_cmd_resp_addr;
1095
1096         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1097
1098         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1099
1100         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1101
1102         HWRM_CHECK_RESULT;
1103
1104         vnic->rss_rule = INVALID_HW_RING_ID;
1105
1106         return rc;
1107 }
1108
1109 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1110 {
1111         int rc = 0;
1112         struct hwrm_vnic_free_input req = {.req_type = 0 };
1113         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1114
1115         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1116                 return rc;
1117
1118         HWRM_PREP(req, VNIC_FREE, -1, resp);
1119
1120         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1121
1122         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1123
1124         HWRM_CHECK_RESULT;
1125
1126         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1127         return rc;
1128 }
1129
1130 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1131                            struct bnxt_vnic_info *vnic)
1132 {
1133         int rc = 0;
1134         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1135         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1136
1137         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1138
1139         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1140
1141         req.ring_grp_tbl_addr =
1142             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1143         req.hash_key_tbl_addr =
1144             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1145         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1146
1147         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1148
1149         HWRM_CHECK_RESULT;
1150
1151         return rc;
1152 }
1153
1154 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1155 {
1156         struct hwrm_func_cfg_input req = {0};
1157         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1158         int rc;
1159
1160         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1161         req.enables = rte_cpu_to_le_32(
1162                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1163         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1164         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1165
1166         HWRM_PREP(req, FUNC_CFG, -1, resp);
1167
1168         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1169         HWRM_CHECK_RESULT;
1170
1171         bp->pf.vf_info[vf].random_mac = false;
1172
1173         return rc;
1174 }
1175
1176 /*
1177  * HWRM utility functions
1178  */
1179
1180 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1181 {
1182         unsigned int i;
1183         int rc = 0;
1184
1185         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1186                 struct bnxt_tx_queue *txq;
1187                 struct bnxt_rx_queue *rxq;
1188                 struct bnxt_cp_ring_info *cpr;
1189
1190                 if (i >= bp->rx_cp_nr_rings) {
1191                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1192                         cpr = txq->cp_ring;
1193                 } else {
1194                         rxq = bp->rx_queues[i];
1195                         cpr = rxq->cp_ring;
1196                 }
1197
1198                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1199                 if (rc)
1200                         return rc;
1201         }
1202         return 0;
1203 }
1204
1205 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1206 {
1207         int rc;
1208         unsigned int i;
1209         struct bnxt_cp_ring_info *cpr;
1210
1211         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1212                 unsigned int idx = i + 1;
1213
1214                 if (i >= bp->rx_cp_nr_rings)
1215                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1216                 else
1217                         cpr = bp->rx_queues[i]->cp_ring;
1218                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1219                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1220                         if (rc)
1221                                 return rc;
1222                 }
1223         }
1224         return 0;
1225 }
1226
1227 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1228 {
1229         unsigned int i;
1230         int rc = 0;
1231
1232         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1233                 struct bnxt_tx_queue *txq;
1234                 struct bnxt_rx_queue *rxq;
1235                 struct bnxt_cp_ring_info *cpr;
1236                 unsigned int idx = i + 1;
1237
1238                 if (i >= bp->rx_cp_nr_rings) {
1239                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1240                         cpr = txq->cp_ring;
1241                 } else {
1242                         rxq = bp->rx_queues[i];
1243                         cpr = rxq->cp_ring;
1244                 }
1245
1246                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1247
1248                 if (rc)
1249                         return rc;
1250         }
1251         return rc;
1252 }
1253
1254 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1255 {
1256         uint16_t i;
1257         uint32_t rc = 0;
1258
1259         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1260                 unsigned int idx = i + 1;
1261
1262                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1263                         RTE_LOG(ERR, PMD,
1264                                 "Attempt to free invalid ring group %d\n",
1265                                 idx);
1266                         continue;
1267                 }
1268
1269                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1270
1271                 if (rc)
1272                         return rc;
1273         }
1274         return rc;
1275 }
1276
1277 static void bnxt_free_cp_ring(struct bnxt *bp,
1278                               struct bnxt_cp_ring_info *cpr, unsigned int idx)
1279 {
1280         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1281
1282         bnxt_hwrm_ring_free(bp, cp_ring,
1283                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1284         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1285         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1286         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1287                         sizeof(*cpr->cp_desc_ring));
1288         cpr->cp_raw_cons = 0;
1289 }
1290
1291 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1292 {
1293         unsigned int i;
1294         int rc = 0;
1295
1296         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1297                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1298                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1299                 struct bnxt_ring *ring = txr->tx_ring_struct;
1300                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1301                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1302
1303                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1304                         bnxt_hwrm_ring_free(bp, ring,
1305                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1306                         ring->fw_ring_id = INVALID_HW_RING_ID;
1307                         memset(txr->tx_desc_ring, 0,
1308                                         txr->tx_ring_struct->ring_size *
1309                                         sizeof(*txr->tx_desc_ring));
1310                         memset(txr->tx_buf_ring, 0,
1311                                         txr->tx_ring_struct->ring_size *
1312                                         sizeof(*txr->tx_buf_ring));
1313                         txr->tx_prod = 0;
1314                         txr->tx_cons = 0;
1315                 }
1316                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1317                         bnxt_free_cp_ring(bp, cpr, idx);
1318         }
1319
1320         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1321                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1322                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1323                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1324                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1325                 unsigned int idx = i + 1;
1326
1327                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1328                         bnxt_hwrm_ring_free(bp, ring,
1329                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1330                         ring->fw_ring_id = INVALID_HW_RING_ID;
1331                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1332                         memset(rxr->rx_desc_ring, 0,
1333                                         rxr->rx_ring_struct->ring_size *
1334                                         sizeof(*rxr->rx_desc_ring));
1335                         memset(rxr->rx_buf_ring, 0,
1336                                         rxr->rx_ring_struct->ring_size *
1337                                         sizeof(*rxr->rx_buf_ring));
1338                         rxr->rx_prod = 0;
1339                 }
1340                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1341                         bnxt_free_cp_ring(bp, cpr, idx);
1342         }
1343
1344         /* Default completion ring */
1345         {
1346                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1347
1348                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1349                         bnxt_free_cp_ring(bp, cpr, 0);
1350         }
1351
1352         return rc;
1353 }
1354
1355 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1356 {
1357         uint16_t i;
1358         uint32_t rc = 0;
1359
1360         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1361                 unsigned int idx = i + 1;
1362
1363                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1364                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1365                         continue;
1366
1367                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1368
1369                 if (rc)
1370                         return rc;
1371         }
1372         return rc;
1373 }
1374
1375 void bnxt_free_hwrm_resources(struct bnxt *bp)
1376 {
1377         /* Release memzone */
1378         rte_free(bp->hwrm_cmd_resp_addr);
1379         bp->hwrm_cmd_resp_addr = NULL;
1380         bp->hwrm_cmd_resp_dma_addr = 0;
1381 }
1382
1383 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1384 {
1385         struct rte_pci_device *pdev = bp->pdev;
1386         char type[RTE_MEMZONE_NAMESIZE];
1387
1388         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1389                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1390         bp->max_req_len = HWRM_MAX_REQ_LEN;
1391         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1392         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1393         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1394         if (bp->hwrm_cmd_resp_addr == NULL)
1395                 return -ENOMEM;
1396         bp->hwrm_cmd_resp_dma_addr =
1397                 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1398         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1399                 RTE_LOG(ERR, PMD,
1400                         "unable to map response address to physical memory\n");
1401                 return -ENOMEM;
1402         }
1403         rte_spinlock_init(&bp->hwrm_lock);
1404
1405         return 0;
1406 }
1407
1408 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1409 {
1410         struct bnxt_filter_info *filter;
1411         int rc = 0;
1412
1413         STAILQ_FOREACH(filter, &vnic->filter, next) {
1414                 rc = bnxt_hwrm_clear_filter(bp, filter);
1415                 if (rc)
1416                         break;
1417         }
1418         return rc;
1419 }
1420
1421 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1422 {
1423         struct bnxt_filter_info *filter;
1424         int rc = 0;
1425
1426         STAILQ_FOREACH(filter, &vnic->filter, next) {
1427                 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1428                 if (rc)
1429                         break;
1430         }
1431         return rc;
1432 }
1433
1434 void bnxt_free_tunnel_ports(struct bnxt *bp)
1435 {
1436         if (bp->vxlan_port_cnt)
1437                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1438                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1439         bp->vxlan_port = 0;
1440         if (bp->geneve_port_cnt)
1441                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1442                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1443         bp->geneve_port = 0;
1444 }
1445
1446 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1447 {
1448         struct bnxt_vnic_info *vnic;
1449         unsigned int i;
1450
1451         if (bp->vnic_info == NULL)
1452                 return;
1453
1454         vnic = &bp->vnic_info[0];
1455         if (BNXT_PF(bp))
1456                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1457
1458         /* VNIC resources */
1459         for (i = 0; i < bp->nr_vnics; i++) {
1460                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1461
1462                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1463
1464                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1465                 bnxt_hwrm_vnic_free(bp, vnic);
1466         }
1467         /* Ring resources */
1468         bnxt_free_all_hwrm_rings(bp);
1469         bnxt_free_all_hwrm_ring_grps(bp);
1470         bnxt_free_all_hwrm_stat_ctxs(bp);
1471         bnxt_free_tunnel_ports(bp);
1472 }
1473
1474 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1475 {
1476         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1477
1478         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1479                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1480
1481         switch (conf_link_speed) {
1482         case ETH_LINK_SPEED_10M_HD:
1483         case ETH_LINK_SPEED_100M_HD:
1484                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1485         }
1486         return hw_link_duplex;
1487 }
1488
1489 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1490 {
1491         uint16_t eth_link_speed = 0;
1492
1493         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1494                 return ETH_LINK_SPEED_AUTONEG;
1495
1496         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1497         case ETH_LINK_SPEED_100M:
1498         case ETH_LINK_SPEED_100M_HD:
1499                 eth_link_speed =
1500                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1501                 break;
1502         case ETH_LINK_SPEED_1G:
1503                 eth_link_speed =
1504                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1505                 break;
1506         case ETH_LINK_SPEED_2_5G:
1507                 eth_link_speed =
1508                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1509                 break;
1510         case ETH_LINK_SPEED_10G:
1511                 eth_link_speed =
1512                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1513                 break;
1514         case ETH_LINK_SPEED_20G:
1515                 eth_link_speed =
1516                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1517                 break;
1518         case ETH_LINK_SPEED_25G:
1519                 eth_link_speed =
1520                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1521                 break;
1522         case ETH_LINK_SPEED_40G:
1523                 eth_link_speed =
1524                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1525                 break;
1526         case ETH_LINK_SPEED_50G:
1527                 eth_link_speed =
1528                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1529                 break;
1530         default:
1531                 RTE_LOG(ERR, PMD,
1532                         "Unsupported link speed %d; default to AUTO\n",
1533                         conf_link_speed);
1534                 break;
1535         }
1536         return eth_link_speed;
1537 }
1538
1539 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1540                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1541                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1542                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1543
1544 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1545 {
1546         uint32_t one_speed;
1547
1548         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1549                 return 0;
1550
1551         if (link_speed & ETH_LINK_SPEED_FIXED) {
1552                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1553
1554                 if (one_speed & (one_speed - 1)) {
1555                         RTE_LOG(ERR, PMD,
1556                                 "Invalid advertised speeds (%u) for port %u\n",
1557                                 link_speed, port_id);
1558                         return -EINVAL;
1559                 }
1560                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1561                         RTE_LOG(ERR, PMD,
1562                                 "Unsupported advertised speed (%u) for port %u\n",
1563                                 link_speed, port_id);
1564                         return -EINVAL;
1565                 }
1566         } else {
1567                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1568                         RTE_LOG(ERR, PMD,
1569                                 "Unsupported advertised speeds (%u) for port %u\n",
1570                                 link_speed, port_id);
1571                         return -EINVAL;
1572                 }
1573         }
1574         return 0;
1575 }
1576
1577 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1578 {
1579         uint16_t ret = 0;
1580
1581         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1582                 link_speed = BNXT_SUPPORTED_SPEEDS;
1583
1584         if (link_speed & ETH_LINK_SPEED_100M)
1585                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1586         if (link_speed & ETH_LINK_SPEED_100M_HD)
1587                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1588         if (link_speed & ETH_LINK_SPEED_1G)
1589                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1590         if (link_speed & ETH_LINK_SPEED_2_5G)
1591                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1592         if (link_speed & ETH_LINK_SPEED_10G)
1593                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1594         if (link_speed & ETH_LINK_SPEED_20G)
1595                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1596         if (link_speed & ETH_LINK_SPEED_25G)
1597                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1598         if (link_speed & ETH_LINK_SPEED_40G)
1599                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1600         if (link_speed & ETH_LINK_SPEED_50G)
1601                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1602         return ret;
1603 }
1604
1605 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1606 {
1607         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1608
1609         switch (hw_link_speed) {
1610         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1611                 eth_link_speed = ETH_SPEED_NUM_100M;
1612                 break;
1613         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1614                 eth_link_speed = ETH_SPEED_NUM_1G;
1615                 break;
1616         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1617                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1618                 break;
1619         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1620                 eth_link_speed = ETH_SPEED_NUM_10G;
1621                 break;
1622         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1623                 eth_link_speed = ETH_SPEED_NUM_20G;
1624                 break;
1625         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1626                 eth_link_speed = ETH_SPEED_NUM_25G;
1627                 break;
1628         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1629                 eth_link_speed = ETH_SPEED_NUM_40G;
1630                 break;
1631         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1632                 eth_link_speed = ETH_SPEED_NUM_50G;
1633                 break;
1634         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1635         default:
1636                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1637                         hw_link_speed);
1638                 break;
1639         }
1640         return eth_link_speed;
1641 }
1642
1643 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1644 {
1645         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1646
1647         switch (hw_link_duplex) {
1648         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1649         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1650                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1651                 break;
1652         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1653                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1654                 break;
1655         default:
1656                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1657                         hw_link_duplex);
1658                 break;
1659         }
1660         return eth_link_duplex;
1661 }
1662
1663 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1664 {
1665         int rc = 0;
1666         struct bnxt_link_info *link_info = &bp->link_info;
1667
1668         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1669         if (rc) {
1670                 RTE_LOG(ERR, PMD,
1671                         "Get link config failed with rc %d\n", rc);
1672                 goto exit;
1673         }
1674         if (link_info->link_up)
1675                 link->link_speed =
1676                         bnxt_parse_hw_link_speed(link_info->link_speed);
1677         else
1678                 link->link_speed = ETH_LINK_SPEED_10M;
1679         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1680         link->link_status = link_info->link_up;
1681         link->link_autoneg = link_info->auto_mode ==
1682                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1683                 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1684 exit:
1685         return rc;
1686 }
1687
1688 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1689 {
1690         int rc = 0;
1691         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1692         struct bnxt_link_info link_req;
1693         uint16_t speed;
1694
1695         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1696                 return 0;
1697
1698         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1699                         bp->eth_dev->data->port_id);
1700         if (rc)
1701                 goto error;
1702
1703         memset(&link_req, 0, sizeof(link_req));
1704         link_req.link_up = link_up;
1705         if (!link_up)
1706                 goto port_phy_cfg;
1707
1708         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1709         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1710         if (speed == 0) {
1711                 link_req.phy_flags |=
1712                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1713                 link_req.auto_mode =
1714                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1715                 link_req.auto_link_speed_mask =
1716                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1717         } else {
1718                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1719                 link_req.link_speed = speed;
1720                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1721         }
1722         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1723         link_req.auto_pause = bp->link_info.auto_pause;
1724         link_req.force_pause = bp->link_info.force_pause;
1725
1726 port_phy_cfg:
1727         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1728         if (rc) {
1729                 RTE_LOG(ERR, PMD,
1730                         "Set link config failed with rc %d\n", rc);
1731         }
1732
1733         rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1734 error:
1735         return rc;
1736 }
1737
1738 /* JIRA 22088 */
1739 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1740 {
1741         struct hwrm_func_qcfg_input req = {0};
1742         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1743         int rc = 0;
1744
1745         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1746         req.fid = rte_cpu_to_le_16(0xffff);
1747
1748         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1749
1750         HWRM_CHECK_RESULT;
1751
1752         /* Hard Coded.. 0xfff VLAN ID mask */
1753         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1754
1755         switch (resp->port_partition_type) {
1756         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1757         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1758         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1759                 bp->port_partition_type = resp->port_partition_type;
1760                 break;
1761         default:
1762                 bp->port_partition_type = 0;
1763                 break;
1764         }
1765
1766         return rc;
1767 }
1768
1769 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1770                                    struct hwrm_func_qcaps_output *qcaps)
1771 {
1772         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1773         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1774                sizeof(qcaps->mac_address));
1775         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1776         qcaps->max_rx_rings = fcfg->num_rx_rings;
1777         qcaps->max_tx_rings = fcfg->num_tx_rings;
1778         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1779         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1780         qcaps->max_vfs = 0;
1781         qcaps->first_vf_id = 0;
1782         qcaps->max_vnics = fcfg->num_vnics;
1783         qcaps->max_decap_records = 0;
1784         qcaps->max_encap_records = 0;
1785         qcaps->max_tx_wm_flows = 0;
1786         qcaps->max_tx_em_flows = 0;
1787         qcaps->max_rx_wm_flows = 0;
1788         qcaps->max_rx_em_flows = 0;
1789         qcaps->max_flow_id = 0;
1790         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1791         qcaps->max_sp_tx_rings = 0;
1792         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1793 }
1794
1795 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1796 {
1797         struct hwrm_func_cfg_input req = {0};
1798         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1799         int rc;
1800
1801         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1802                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1803                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1804                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1805                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1806                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1807                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1808                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1809                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1810                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1811         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1812         req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1813                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1814         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1815                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1816         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1817         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1818         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1819         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1820         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1821         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1822         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1823         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1824         req.fid = rte_cpu_to_le_16(0xffff);
1825
1826         HWRM_PREP(req, FUNC_CFG, -1, resp);
1827
1828         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1829         HWRM_CHECK_RESULT;
1830
1831         return rc;
1832 }
1833
1834 static void populate_vf_func_cfg_req(struct bnxt *bp,
1835                                      struct hwrm_func_cfg_input *req,
1836                                      int num_vfs)
1837 {
1838         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1839                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1840                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1841                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1842                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1843                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1844                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1845                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1846                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1847                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1848
1849         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1850                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1851         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1852                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1853         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
1854                                                 (num_vfs + 1));
1855         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
1856         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
1857                                                (num_vfs + 1));
1858         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
1859         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
1860         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
1861         /* TODO: For now, do not support VMDq/RFS on VFs. */
1862         req->num_vnics = rte_cpu_to_le_16(1);
1863         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
1864                                                  (num_vfs + 1));
1865 }
1866
1867 static void add_random_mac_if_needed(struct bnxt *bp,
1868                                      struct hwrm_func_cfg_input *cfg_req,
1869                                      int vf)
1870 {
1871         struct ether_addr mac;
1872
1873         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
1874                 return;
1875
1876         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
1877                 cfg_req->enables |=
1878                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1879                 eth_random_addr(cfg_req->dflt_mac_addr);
1880                 bp->pf.vf_info[vf].random_mac = true;
1881         } else {
1882                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
1883         }
1884 }
1885
1886 static void reserve_resources_from_vf(struct bnxt *bp,
1887                                       struct hwrm_func_cfg_input *cfg_req,
1888                                       int vf)
1889 {
1890         struct hwrm_func_qcaps_input req = {0};
1891         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1892         int rc;
1893
1894         /* Get the actual allocated values now */
1895         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
1896         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1897         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1898
1899         if (rc) {
1900                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
1901                 copy_func_cfg_to_qcaps(cfg_req, resp);
1902         } else if (resp->error_code) {
1903                 rc = rte_le_to_cpu_16(resp->error_code);
1904                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
1905                 copy_func_cfg_to_qcaps(cfg_req, resp);
1906         }
1907
1908         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
1909         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
1910         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
1911         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
1912         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
1913         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
1914         /*
1915          * TODO: While not supporting VMDq with VFs, max_vnics is always
1916          * forced to 1 in this case
1917          */
1918         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
1919         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
1920 }
1921
1922 static int update_pf_resource_max(struct bnxt *bp)
1923 {
1924         struct hwrm_func_qcfg_input req = {0};
1925         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1926         int rc;
1927
1928         /* And copy the allocated numbers into the pf struct */
1929         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1930         req.fid = rte_cpu_to_le_16(0xffff);
1931         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1932         HWRM_CHECK_RESULT;
1933
1934         /* Only TX ring value reflects actual allocation? TODO */
1935         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
1936         bp->pf.evb_mode = resp->evb_mode;
1937
1938         return rc;
1939 }
1940
1941 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
1942 {
1943         int rc;
1944
1945         if (!BNXT_PF(bp)) {
1946                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1947                 return -1;
1948         }
1949
1950         rc = bnxt_hwrm_func_qcaps(bp);
1951         if (rc)
1952                 return rc;
1953
1954         bp->pf.func_cfg_flags &=
1955                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1956                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1957         bp->pf.func_cfg_flags |=
1958                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
1959         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
1960         return rc;
1961 }
1962
1963 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
1964 {
1965         struct hwrm_func_cfg_input req = {0};
1966         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1967         int i;
1968         size_t sz;
1969         int rc = 0;
1970         size_t req_buf_sz;
1971
1972         if (!BNXT_PF(bp)) {
1973                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1974                 return -1;
1975         }
1976
1977         rc = bnxt_hwrm_func_qcaps(bp);
1978
1979         if (rc)
1980                 return rc;
1981
1982         bp->pf.active_vfs = num_vfs;
1983
1984         /*
1985          * First, configure the PF to only use one TX ring.  This ensures that
1986          * there are enough rings for all VFs.
1987          *
1988          * If we don't do this, when we call func_alloc() later, we will lock
1989          * extra rings to the PF that won't be available during func_cfg() of
1990          * the VFs.
1991          *
1992          * This has been fixed with firmware versions above 20.6.54
1993          */
1994         bp->pf.func_cfg_flags &=
1995                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1996                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1997         bp->pf.func_cfg_flags |=
1998                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
1999         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2000         if (rc)
2001                 return rc;
2002
2003         /*
2004          * Now, create and register a buffer to hold forwarded VF requests
2005          */
2006         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2007         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2008                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2009         if (bp->pf.vf_req_buf == NULL) {
2010                 rc = -ENOMEM;
2011                 goto error_free;
2012         }
2013         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2014                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2015         for (i = 0; i < num_vfs; i++)
2016                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2017                                         (i * HWRM_MAX_REQ_LEN);
2018
2019         rc = bnxt_hwrm_func_buf_rgtr(bp);
2020         if (rc)
2021                 goto error_free;
2022
2023         populate_vf_func_cfg_req(bp, &req, num_vfs);
2024
2025         bp->pf.active_vfs = 0;
2026         for (i = 0; i < num_vfs; i++) {
2027                 add_random_mac_if_needed(bp, &req, i);
2028
2029                 HWRM_PREP(req, FUNC_CFG, -1, resp);
2030                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2031                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2032                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2033
2034                 /* Clear enable flag for next pass */
2035                 req.enables &= ~rte_cpu_to_le_32(
2036                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2037
2038                 if (rc || resp->error_code) {
2039                         RTE_LOG(ERR, PMD,
2040                                 "Failed to initizlie VF %d\n", i);
2041                         RTE_LOG(ERR, PMD,
2042                                 "Not all VFs available. (%d, %d)\n",
2043                                 rc, resp->error_code);
2044                         break;
2045                 }
2046
2047                 reserve_resources_from_vf(bp, &req, i);
2048                 bp->pf.active_vfs++;
2049         }
2050
2051         /*
2052          * Now configure the PF to use "the rest" of the resources
2053          * We're using STD_TX_RING_MODE here though which will limit the TX
2054          * rings.  This will allow QoS to function properly.  Not setting this
2055          * will cause PF rings to break bandwidth settings.
2056          */
2057         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2058         if (rc)
2059                 goto error_free;
2060
2061         rc = update_pf_resource_max(bp);
2062         if (rc)
2063                 goto error_free;
2064
2065         return rc;
2066
2067 error_free:
2068         bnxt_hwrm_func_buf_unrgtr(bp);
2069         return rc;
2070 }
2071
2072 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2073                                 uint8_t tunnel_type)
2074 {
2075         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2076         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2077         int rc = 0;
2078
2079         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2080         req.tunnel_type = tunnel_type;
2081         req.tunnel_dst_port_val = port;
2082         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2083         HWRM_CHECK_RESULT;
2084
2085         switch (tunnel_type) {
2086         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2087                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2088                 bp->vxlan_port = port;
2089                 break;
2090         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2091                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2092                 bp->geneve_port = port;
2093                 break;
2094         default:
2095                 break;
2096         }
2097         return rc;
2098 }
2099
2100 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2101                                 uint8_t tunnel_type)
2102 {
2103         struct hwrm_tunnel_dst_port_free_input req = {0};
2104         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2105         int rc = 0;
2106
2107         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2108         req.tunnel_type = tunnel_type;
2109         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2110         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2111         HWRM_CHECK_RESULT;
2112
2113         return rc;
2114 }
2115
2116 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2117 {
2118         int rc = 0;
2119         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2120         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2121
2122         HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2123
2124         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2125         req.req_buf_page_size = rte_cpu_to_le_16(
2126                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2127         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2128         req.req_buf_page_addr[0] =
2129                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2130         if (req.req_buf_page_addr[0] == 0) {
2131                 RTE_LOG(ERR, PMD,
2132                         "unable to map buffer address to physical memory\n");
2133                 return -ENOMEM;
2134         }
2135
2136         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2137
2138         HWRM_CHECK_RESULT;
2139
2140         return rc;
2141 }
2142
2143 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2144 {
2145         int rc = 0;
2146         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2147         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2148
2149         HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2150
2151         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2152
2153         HWRM_CHECK_RESULT;
2154
2155         return rc;
2156 }
2157
2158 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2159 {
2160         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2161         struct hwrm_func_cfg_input req = {0};
2162         int rc;
2163
2164         HWRM_PREP(req, FUNC_CFG, -1, resp);
2165         req.fid = rte_cpu_to_le_16(0xffff);
2166         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2167         req.enables = rte_cpu_to_le_32(
2168                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2169         req.async_event_cr = rte_cpu_to_le_16(
2170                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2171         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2172         HWRM_CHECK_RESULT;
2173
2174         return rc;
2175 }
2176
2177 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2178 {
2179         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2180         struct hwrm_func_vf_cfg_input req = {0};
2181         int rc;
2182
2183         HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2184         req.enables = rte_cpu_to_le_32(
2185                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2186         req.async_event_cr = rte_cpu_to_le_16(
2187                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2188         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2189         HWRM_CHECK_RESULT;
2190
2191         return rc;
2192 }
2193
2194 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2195                               void *encaped, size_t ec_size)
2196 {
2197         int rc = 0;
2198         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2199         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2200
2201         if (ec_size > sizeof(req.encap_request))
2202                 return -1;
2203
2204         HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2205
2206         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2207         memcpy(req.encap_request, encaped, ec_size);
2208
2209         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2210
2211         HWRM_CHECK_RESULT;
2212
2213         return rc;
2214 }
2215
2216 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2217                                        struct ether_addr *mac)
2218 {
2219         struct hwrm_func_qcfg_input req = {0};
2220         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2221         int rc;
2222
2223         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2224         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2225         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2226
2227         HWRM_CHECK_RESULT;
2228
2229         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2230         return rc;
2231 }
2232
2233 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2234                             void *encaped, size_t ec_size)
2235 {
2236         int rc = 0;
2237         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2238         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2239
2240         if (ec_size > sizeof(req.encap_request))
2241                 return -1;
2242
2243         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2244
2245         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2246         memcpy(req.encap_request, encaped, ec_size);
2247
2248         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2249
2250         HWRM_CHECK_RESULT;
2251
2252         return rc;
2253 }
2254
2255 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2256 {
2257         struct hwrm_port_qstats_input req = {0};
2258         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2259         struct bnxt_pf_info *pf = &bp->pf;
2260         int rc;
2261
2262         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2263                 return 0;
2264
2265         HWRM_PREP(req, PORT_QSTATS, -1, resp);
2266         req.port_id = rte_cpu_to_le_16(pf->port_id);
2267         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2268         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2269         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2270         HWRM_CHECK_RESULT;
2271         return rc;
2272 }
2273
2274 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2275 {
2276         struct hwrm_port_clr_stats_input req = {0};
2277         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2278         struct bnxt_pf_info *pf = &bp->pf;
2279         int rc;
2280
2281         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2282                 return 0;
2283
2284         HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2285         req.port_id = rte_cpu_to_le_16(pf->port_id);
2286         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2287         HWRM_CHECK_RESULT;
2288         return rc;
2289 }