d2a8d8f110e64ea6d73823c9687a56200dce2a15
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <unistd.h>
37
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
44
45 #include "bnxt.h"
46 #include "bnxt_cpr.h"
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
49 #include "bnxt_rxq.h"
50 #include "bnxt_rxr.h"
51 #include "bnxt_ring.h"
52 #include "bnxt_txq.h"
53 #include "bnxt_txr.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
56
57 #include <rte_io.h>
58
59 #define HWRM_CMD_TIMEOUT                2000
60
61 struct bnxt_plcmodes_cfg {
62         uint32_t        flags;
63         uint16_t        jumbo_thresh;
64         uint16_t        hds_offset;
65         uint16_t        hds_threshold;
66 };
67
68 static int page_getenum(size_t size)
69 {
70         if (size <= 1 << 4)
71                 return 4;
72         if (size <= 1 << 12)
73                 return 12;
74         if (size <= 1 << 13)
75                 return 13;
76         if (size <= 1 << 16)
77                 return 16;
78         if (size <= 1 << 21)
79                 return 21;
80         if (size <= 1 << 22)
81                 return 22;
82         if (size <= 1 << 30)
83                 return 30;
84         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85         return sizeof(void *) * 8 - 1;
86 }
87
88 static int page_roundup(size_t size)
89 {
90         return 1 << page_getenum(size);
91 }
92
93 /*
94  * HWRM Functions (sent to HWRM)
95  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97  * command was failed by the ChiMP.
98  */
99
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
101                                         uint32_t msg_len)
102 {
103         unsigned int i;
104         struct input *req = msg;
105         struct output *resp = bp->hwrm_cmd_resp_addr;
106         uint32_t *data = msg;
107         uint8_t *bar;
108         uint8_t *valid;
109
110         /* Write request msg to hwrm channel */
111         for (i = 0; i < msg_len; i += 4) {
112                 bar = (uint8_t *)bp->bar0 + i;
113                 rte_write32(*data, bar);
114                 data++;
115         }
116
117         /* Zero the rest of the request space */
118         for (; i < bp->max_req_len; i += 4) {
119                 bar = (uint8_t *)bp->bar0 + i;
120                 rte_write32(0, bar);
121         }
122
123         /* Ring channel doorbell */
124         bar = (uint8_t *)bp->bar0 + 0x100;
125         rte_write32(1, bar);
126
127         /* Poll for the valid bit */
128         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129                 /* Sanity check on the resp->resp_len */
130                 rte_rmb();
131                 if (resp->resp_len && resp->resp_len <=
132                                 bp->max_resp_len) {
133                         /* Last byte of resp contains the valid key */
134                         valid = (uint8_t *)resp + resp->resp_len - 1;
135                         if (*valid == HWRM_RESP_VALID_KEY)
136                                 break;
137                 }
138                 rte_delay_us(600);
139         }
140
141         if (i >= HWRM_CMD_TIMEOUT) {
142                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
143                         req->req_type);
144                 goto err_ret;
145         }
146         return 0;
147
148 err_ret:
149         return -1;
150 }
151
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
153 {
154         int rc;
155
156         rte_spinlock_lock(&bp->hwrm_lock);
157         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158         rte_spinlock_unlock(&bp->hwrm_lock);
159         return rc;
160 }
161
162 #define HWRM_PREP(req, type, cr, resp) \
163         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165         req.cmpl_ring = rte_cpu_to_le_16(cr); \
166         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167         req.target_id = rte_cpu_to_le_16(0xffff); \
168         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
169
170 #define HWRM_CHECK_RESULT \
171         { \
172                 if (rc) { \
173                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
174                                 __func__, rc); \
175                         return rc; \
176                 } \
177                 if (resp->error_code) { \
178                         rc = rte_le_to_cpu_16(resp->error_code); \
179                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
180                         return rc; \
181                 } \
182         }
183
184 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
185 {
186         int rc = 0;
187         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
188         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
189
190         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
191         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
192         req.mask = 0;
193
194         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
195
196         HWRM_CHECK_RESULT;
197
198         return rc;
199 }
200
201 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
202 {
203         int rc = 0;
204         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
205         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
206         uint32_t mask = 0;
207
208         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
209         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
210
211         /* FIXME add multicast flag, when multicast adding options is supported
212          * by ethtool.
213          */
214         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
215                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
216         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
217                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
218         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
219                                     mask);
220
221         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
222
223         HWRM_CHECK_RESULT;
224
225         return rc;
226 }
227
228 int bnxt_hwrm_clear_filter(struct bnxt *bp,
229                            struct bnxt_filter_info *filter)
230 {
231         int rc = 0;
232         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
233         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
234
235         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
236
237         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
238
239         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
240
241         HWRM_CHECK_RESULT;
242
243         filter->fw_l2_filter_id = -1;
244
245         return 0;
246 }
247
248 int bnxt_hwrm_set_filter(struct bnxt *bp,
249                          struct bnxt_vnic_info *vnic,
250                          struct bnxt_filter_info *filter)
251 {
252         int rc = 0;
253         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
254         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
255         uint32_t enables = 0;
256
257         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
258
259         req.flags = rte_cpu_to_le_32(filter->flags);
260
261         enables = filter->enables |
262               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
263         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
264
265         if (enables &
266             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
267                 memcpy(req.l2_addr, filter->l2_addr,
268                        ETHER_ADDR_LEN);
269         if (enables &
270             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
271                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
272                        ETHER_ADDR_LEN);
273         if (enables &
274             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
275                 req.l2_ovlan = filter->l2_ovlan;
276         if (enables &
277             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
278                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
279
280         req.enables = rte_cpu_to_le_32(enables);
281
282         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
283
284         HWRM_CHECK_RESULT;
285
286         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
287
288         return rc;
289 }
290
291 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
292 {
293         int rc = 0;
294         struct hwrm_func_qcaps_input req = {.req_type = 0 };
295         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
296         uint16_t new_max_vfs;
297         int i;
298
299         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
300
301         req.fid = rte_cpu_to_le_16(0xffff);
302
303         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
304
305         HWRM_CHECK_RESULT;
306
307         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
308         if (BNXT_PF(bp)) {
309                 bp->pf.port_id = resp->port_id;
310                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
311                 new_max_vfs = bp->pdev->max_vfs;
312                 if (new_max_vfs != bp->pf.max_vfs) {
313                         if (bp->pf.vf_info)
314                                 rte_free(bp->pf.vf_info);
315                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
316                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
317                         bp->pf.max_vfs = new_max_vfs;
318                         for (i = 0; i < new_max_vfs; i++) {
319                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
320                                 bp->pf.vf_info[i].vlan_table =
321                                         rte_zmalloc("VF VLAN table",
322                                                     getpagesize(),
323                                                     getpagesize());
324                                 if (bp->pf.vf_info[i].vlan_table == NULL)
325                                         RTE_LOG(ERR, PMD,
326                                         "Fail to alloc VLAN table for VF %d\n",
327                                         i);
328                                 else
329                                         rte_mem_lock_page(
330                                                 bp->pf.vf_info[i].vlan_table);
331                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
332                         }
333                 }
334         }
335
336         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
337         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
338         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
339         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
340         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
341         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
342         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
343         /* TODO: For now, do not support VMDq/RFS on VFs. */
344         if (BNXT_PF(bp)) {
345                 if (bp->pf.max_vfs)
346                         bp->max_vnics = 1;
347                 else
348                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
349         } else {
350                 bp->max_vnics = 1;
351         }
352         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
353         if (BNXT_PF(bp))
354                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
355
356         return rc;
357 }
358
359 int bnxt_hwrm_func_reset(struct bnxt *bp)
360 {
361         int rc = 0;
362         struct hwrm_func_reset_input req = {.req_type = 0 };
363         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
364
365         HWRM_PREP(req, FUNC_RESET, -1, resp);
366
367         req.enables = rte_cpu_to_le_32(0);
368
369         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
370
371         HWRM_CHECK_RESULT;
372
373         return rc;
374 }
375
376 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
377 {
378         int rc;
379         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
380         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
381
382         if (bp->flags & BNXT_FLAG_REGISTERED)
383                 return 0;
384
385         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
386         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
387                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
388         req.ver_maj = RTE_VER_YEAR;
389         req.ver_min = RTE_VER_MONTH;
390         req.ver_upd = RTE_VER_MINOR;
391
392         if (BNXT_PF(bp)) {
393                 req.enables |= rte_cpu_to_le_32(
394                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
395                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
396                        RTE_MIN(sizeof(req.vf_req_fwd),
397                                sizeof(bp->pf.vf_req_fwd)));
398         }
399
400         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
401         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
402
403         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
404
405         HWRM_CHECK_RESULT;
406
407         bp->flags |= BNXT_FLAG_REGISTERED;
408
409         return rc;
410 }
411
412 int bnxt_hwrm_ver_get(struct bnxt *bp)
413 {
414         int rc = 0;
415         struct hwrm_ver_get_input req = {.req_type = 0 };
416         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
417         uint32_t my_version;
418         uint32_t fw_version;
419         uint16_t max_resp_len;
420         char type[RTE_MEMZONE_NAMESIZE];
421
422         HWRM_PREP(req, VER_GET, -1, resp);
423
424         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
425         req.hwrm_intf_min = HWRM_VERSION_MINOR;
426         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
427
428         /*
429          * Hold the lock since we may be adjusting the response pointers.
430          */
431         rte_spinlock_lock(&bp->hwrm_lock);
432         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
433
434         HWRM_CHECK_RESULT;
435
436         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
437                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
438                 resp->hwrm_intf_upd,
439                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
440         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
441                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
442         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
443                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
444
445         my_version = HWRM_VERSION_MAJOR << 16;
446         my_version |= HWRM_VERSION_MINOR << 8;
447         my_version |= HWRM_VERSION_UPDATE;
448
449         fw_version = resp->hwrm_intf_maj << 16;
450         fw_version |= resp->hwrm_intf_min << 8;
451         fw_version |= resp->hwrm_intf_upd;
452
453         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
454                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
455                 rc = -EINVAL;
456                 goto error;
457         }
458
459         if (my_version != fw_version) {
460                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
461                 if (my_version < fw_version) {
462                         RTE_LOG(INFO, PMD,
463                                 "Firmware API version is newer than driver.\n");
464                         RTE_LOG(INFO, PMD,
465                                 "The driver may be missing features.\n");
466                 } else {
467                         RTE_LOG(INFO, PMD,
468                                 "Firmware API version is older than driver.\n");
469                         RTE_LOG(INFO, PMD,
470                                 "Not all driver features may be functional.\n");
471                 }
472         }
473
474         if (bp->max_req_len > resp->max_req_win_len) {
475                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
476                 rc = -EINVAL;
477         }
478         bp->max_req_len = resp->max_req_win_len;
479         max_resp_len = resp->max_resp_len;
480         if (bp->max_resp_len != max_resp_len) {
481                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
482                         bp->pdev->addr.domain, bp->pdev->addr.bus,
483                         bp->pdev->addr.devid, bp->pdev->addr.function);
484
485                 rte_free(bp->hwrm_cmd_resp_addr);
486
487                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
488                 if (bp->hwrm_cmd_resp_addr == NULL) {
489                         rc = -ENOMEM;
490                         goto error;
491                 }
492                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
493                 bp->hwrm_cmd_resp_dma_addr =
494                         rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
495                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
496                         RTE_LOG(ERR, PMD,
497                         "Unable to map response buffer to physical memory.\n");
498                         rc = -ENOMEM;
499                         goto error;
500                 }
501                 bp->max_resp_len = max_resp_len;
502         }
503
504 error:
505         rte_spinlock_unlock(&bp->hwrm_lock);
506         return rc;
507 }
508
509 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
510 {
511         int rc;
512         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
513         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
514
515         if (!(bp->flags & BNXT_FLAG_REGISTERED))
516                 return 0;
517
518         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
519         req.flags = flags;
520
521         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
522
523         HWRM_CHECK_RESULT;
524
525         bp->flags &= ~BNXT_FLAG_REGISTERED;
526
527         return rc;
528 }
529
530 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
531 {
532         int rc = 0;
533         struct hwrm_port_phy_cfg_input req = {0};
534         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
535         uint32_t enables = 0;
536
537         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
538
539         if (conf->link_up) {
540                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
541                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
542                 /*
543                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
544                  * any auto mode, even "none".
545                  */
546                 if (!conf->link_speed) {
547                         req.auto_mode |= conf->auto_mode;
548                         enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
549                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
550                         enables |=
551                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
552                         req.auto_link_speed = bp->link_info.auto_link_speed;
553                         enables |=
554                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
555                 }
556                 req.auto_duplex = conf->duplex;
557                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
558                 req.auto_pause = conf->auto_pause;
559                 req.force_pause = conf->force_pause;
560                 /* Set force_pause if there is no auto or if there is a force */
561                 if (req.auto_pause && !req.force_pause)
562                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
563                 else
564                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
565
566                 req.enables = rte_cpu_to_le_32(enables);
567         } else {
568                 req.flags =
569                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
570                 RTE_LOG(INFO, PMD, "Force Link Down\n");
571         }
572
573         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
574
575         HWRM_CHECK_RESULT;
576
577         return rc;
578 }
579
580 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
581                                    struct bnxt_link_info *link_info)
582 {
583         int rc = 0;
584         struct hwrm_port_phy_qcfg_input req = {0};
585         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
586
587         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
588
589         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
590
591         HWRM_CHECK_RESULT;
592
593         link_info->phy_link_status = resp->link;
594         if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
595                 link_info->link_up = 1;
596                 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
597         } else {
598                 link_info->link_up = 0;
599                 link_info->link_speed = 0;
600         }
601         link_info->duplex = resp->duplex;
602         link_info->pause = resp->pause;
603         link_info->auto_pause = resp->auto_pause;
604         link_info->force_pause = resp->force_pause;
605         link_info->auto_mode = resp->auto_mode;
606
607         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
608         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
609         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
610         link_info->phy_ver[0] = resp->phy_maj;
611         link_info->phy_ver[1] = resp->phy_min;
612         link_info->phy_ver[2] = resp->phy_bld;
613
614         return rc;
615 }
616
617 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
618 {
619         int rc = 0;
620         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
621         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
622
623         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
624
625         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
626
627         HWRM_CHECK_RESULT;
628
629 #define GET_QUEUE_INFO(x) \
630         bp->cos_queue[x].id = resp->queue_id##x; \
631         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
632
633         GET_QUEUE_INFO(0);
634         GET_QUEUE_INFO(1);
635         GET_QUEUE_INFO(2);
636         GET_QUEUE_INFO(3);
637         GET_QUEUE_INFO(4);
638         GET_QUEUE_INFO(5);
639         GET_QUEUE_INFO(6);
640         GET_QUEUE_INFO(7);
641
642         return rc;
643 }
644
645 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
646                          struct bnxt_ring *ring,
647                          uint32_t ring_type, uint32_t map_index,
648                          uint32_t stats_ctx_id)
649 {
650         int rc = 0;
651         struct hwrm_ring_alloc_input req = {.req_type = 0 };
652         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
653
654         HWRM_PREP(req, RING_ALLOC, -1, resp);
655
656         req.enables = rte_cpu_to_le_32(0);
657
658         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
659         req.fbo = rte_cpu_to_le_32(0);
660         /* Association of ring index with doorbell index */
661         req.logical_id = rte_cpu_to_le_16(map_index);
662
663         switch (ring_type) {
664         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
665                 req.queue_id = bp->cos_queue[0].id;
666                 /* FALLTHROUGH */
667         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
668                 req.ring_type = ring_type;
669                 req.cmpl_ring_id =
670                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
671                 req.length = rte_cpu_to_le_32(ring->ring_size);
672                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
673                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
674                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
675                 break;
676         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
677                 req.ring_type = ring_type;
678                 /*
679                  * TODO: Some HWRM versions crash with
680                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
681                  */
682                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
683                 req.length = rte_cpu_to_le_32(ring->ring_size);
684                 break;
685         default:
686                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
687                         ring_type);
688                 return -1;
689         }
690
691         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
692
693         if (rc || resp->error_code) {
694                 if (rc == 0 && resp->error_code)
695                         rc = rte_le_to_cpu_16(resp->error_code);
696                 switch (ring_type) {
697                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
698                         RTE_LOG(ERR, PMD,
699                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
700                         return rc;
701                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
702                         RTE_LOG(ERR, PMD,
703                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
704                         return rc;
705                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
706                         RTE_LOG(ERR, PMD,
707                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
708                         return rc;
709                 default:
710                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
711                         return rc;
712                 }
713         }
714
715         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
716         return rc;
717 }
718
719 int bnxt_hwrm_ring_free(struct bnxt *bp,
720                         struct bnxt_ring *ring, uint32_t ring_type)
721 {
722         int rc;
723         struct hwrm_ring_free_input req = {.req_type = 0 };
724         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
725
726         HWRM_PREP(req, RING_FREE, -1, resp);
727
728         req.ring_type = ring_type;
729         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
730
731         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
732
733         if (rc || resp->error_code) {
734                 if (rc == 0 && resp->error_code)
735                         rc = rte_le_to_cpu_16(resp->error_code);
736
737                 switch (ring_type) {
738                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
739                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
740                                 rc);
741                         return rc;
742                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
743                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
744                                 rc);
745                         return rc;
746                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
747                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
748                                 rc);
749                         return rc;
750                 default:
751                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
752                         return rc;
753                 }
754         }
755         return 0;
756 }
757
758 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
759 {
760         int rc = 0;
761         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
762         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
763
764         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
765
766         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
767         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
768         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
769         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
770
771         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
772
773         HWRM_CHECK_RESULT;
774
775         bp->grp_info[idx].fw_grp_id =
776             rte_le_to_cpu_16(resp->ring_group_id);
777
778         return rc;
779 }
780
781 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
782 {
783         int rc;
784         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
785         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
786
787         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
788
789         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
790
791         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
792
793         HWRM_CHECK_RESULT;
794
795         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
796         return rc;
797 }
798
799 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
800 {
801         int rc = 0;
802         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
803         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
804
805         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
806
807         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
808                 return rc;
809
810         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
811         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
812
813         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
814
815         HWRM_CHECK_RESULT;
816
817         return rc;
818 }
819
820 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
821                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
822 {
823         int rc;
824         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
825         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
826
827         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
828
829         req.update_period_ms = rte_cpu_to_le_32(1000);
830
831         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
832         req.stats_dma_addr =
833             rte_cpu_to_le_64(cpr->hw_stats_map);
834
835         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
836
837         HWRM_CHECK_RESULT;
838
839         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
840         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
841
842         return rc;
843 }
844
845 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
846                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
847 {
848         int rc;
849         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
850         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
851
852         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
853
854         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
855         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
856
857         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
858
859         HWRM_CHECK_RESULT;
860
861         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
862         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
863
864         return rc;
865 }
866
867 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
868 {
869         int rc = 0, i, j;
870         struct hwrm_vnic_alloc_input req = { 0 };
871         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
872
873         /* map ring groups to this vnic */
874         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
875                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
876                         RTE_LOG(ERR, PMD,
877                                 "Not enough ring groups avail:%x req:%x\n", j,
878                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
879                         break;
880                 }
881                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
882         }
883         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
884         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
885         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
886         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
887         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
888                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
889         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
890
891         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
892
893         HWRM_CHECK_RESULT;
894
895         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
896         return rc;
897 }
898
899 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
900                                         struct bnxt_vnic_info *vnic,
901                                         struct bnxt_plcmodes_cfg *pmode)
902 {
903         int rc = 0;
904         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
905         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
906
907         HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
908
909         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
910
911         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
912
913         HWRM_CHECK_RESULT;
914
915         pmode->flags = rte_le_to_cpu_32(resp->flags);
916         /* dflt_vnic bit doesn't exist in the _cfg command */
917         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
918         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
919         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
920         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
921
922         return rc;
923 }
924
925 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
926                                        struct bnxt_vnic_info *vnic,
927                                        struct bnxt_plcmodes_cfg *pmode)
928 {
929         int rc = 0;
930         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
931         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
932
933         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
934
935         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
936         req.flags = rte_cpu_to_le_32(pmode->flags);
937         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
938         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
939         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
940         req.enables = rte_cpu_to_le_32(
941             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
942             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
943             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
944         );
945
946         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
947
948         HWRM_CHECK_RESULT;
949
950         return rc;
951 }
952
953 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
954 {
955         int rc = 0;
956         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
957         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
958         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
959         struct bnxt_plcmodes_cfg pmodes;
960
961         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
962         if (rc)
963                 return rc;
964
965         HWRM_PREP(req, VNIC_CFG, -1, resp);
966
967         /* Only RSS support for now TBD: COS & LB */
968         req.enables =
969             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
970                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
971         if (vnic->lb_rule != 0xffff)
972                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
973         if (vnic->cos_rule != 0xffff)
974                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
975         if (vnic->rss_rule != 0xffff)
976                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
977         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
978         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
979         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
980         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
981         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
982         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
983         req.mru = rte_cpu_to_le_16(vnic->mru);
984         if (vnic->func_default)
985                 req.flags |=
986                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
987         if (vnic->vlan_strip)
988                 req.flags |=
989                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
990         if (vnic->bd_stall)
991                 req.flags |=
992                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
993         if (vnic->roce_dual)
994                 req.flags |= rte_cpu_to_le_32(
995                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
996         if (vnic->roce_only)
997                 req.flags |= rte_cpu_to_le_32(
998                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
999         if (vnic->rss_dflt_cr)
1000                 req.flags |= rte_cpu_to_le_32(
1001                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1002
1003         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1004
1005         HWRM_CHECK_RESULT;
1006
1007         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1008
1009         return rc;
1010 }
1011
1012 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1013                 int16_t fw_vf_id)
1014 {
1015         int rc = 0;
1016         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1017         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1018
1019         HWRM_PREP(req, VNIC_QCFG, -1, resp);
1020
1021         req.enables =
1022                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1023         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1024         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1025
1026         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1027
1028         HWRM_CHECK_RESULT;
1029
1030         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1031         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1032         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1033         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1034         vnic->mru = rte_le_to_cpu_16(resp->mru);
1035         vnic->func_default = rte_le_to_cpu_32(
1036                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1037         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1038                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1039         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1040                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1041         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1042                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1043         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1044                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1045         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1046                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1047
1048         return rc;
1049 }
1050
1051 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1052 {
1053         int rc = 0;
1054         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1055         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1056                                                 bp->hwrm_cmd_resp_addr;
1057
1058         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1059
1060         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1061
1062         HWRM_CHECK_RESULT;
1063
1064         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1065
1066         return rc;
1067 }
1068
1069 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1070 {
1071         int rc = 0;
1072         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1073         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1074                                                 bp->hwrm_cmd_resp_addr;
1075
1076         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1077
1078         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1079
1080         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1081
1082         HWRM_CHECK_RESULT;
1083
1084         vnic->rss_rule = INVALID_HW_RING_ID;
1085
1086         return rc;
1087 }
1088
1089 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1090 {
1091         int rc = 0;
1092         struct hwrm_vnic_free_input req = {.req_type = 0 };
1093         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1094
1095         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1096                 return rc;
1097
1098         HWRM_PREP(req, VNIC_FREE, -1, resp);
1099
1100         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1101
1102         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1103
1104         HWRM_CHECK_RESULT;
1105
1106         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1107         return rc;
1108 }
1109
1110 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1111                            struct bnxt_vnic_info *vnic)
1112 {
1113         int rc = 0;
1114         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1115         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1116
1117         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1118
1119         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1120
1121         req.ring_grp_tbl_addr =
1122             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1123         req.hash_key_tbl_addr =
1124             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1125         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1126
1127         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1128
1129         HWRM_CHECK_RESULT;
1130
1131         return rc;
1132 }
1133
1134 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1135 {
1136         struct hwrm_func_cfg_input req = {0};
1137         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1138         int rc;
1139
1140         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1141         req.enables = rte_cpu_to_le_32(
1142                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1143         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1144         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1145
1146         HWRM_PREP(req, FUNC_CFG, -1, resp);
1147
1148         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1149         HWRM_CHECK_RESULT;
1150
1151         bp->pf.vf_info[vf].random_mac = false;
1152
1153         return rc;
1154 }
1155
1156 /*
1157  * HWRM utility functions
1158  */
1159
1160 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1161 {
1162         unsigned int i;
1163         int rc = 0;
1164
1165         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1166                 struct bnxt_tx_queue *txq;
1167                 struct bnxt_rx_queue *rxq;
1168                 struct bnxt_cp_ring_info *cpr;
1169
1170                 if (i >= bp->rx_cp_nr_rings) {
1171                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1172                         cpr = txq->cp_ring;
1173                 } else {
1174                         rxq = bp->rx_queues[i];
1175                         cpr = rxq->cp_ring;
1176                 }
1177
1178                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1179                 if (rc)
1180                         return rc;
1181         }
1182         return 0;
1183 }
1184
1185 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1186 {
1187         int rc;
1188         unsigned int i;
1189         struct bnxt_cp_ring_info *cpr;
1190
1191         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1192                 unsigned int idx = i + 1;
1193
1194                 if (i >= bp->rx_cp_nr_rings)
1195                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1196                 else
1197                         cpr = bp->rx_queues[i]->cp_ring;
1198                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1199                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1200                         if (rc)
1201                                 return rc;
1202                 }
1203         }
1204         return 0;
1205 }
1206
1207 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1208 {
1209         unsigned int i;
1210         int rc = 0;
1211
1212         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1213                 struct bnxt_tx_queue *txq;
1214                 struct bnxt_rx_queue *rxq;
1215                 struct bnxt_cp_ring_info *cpr;
1216                 unsigned int idx = i + 1;
1217
1218                 if (i >= bp->rx_cp_nr_rings) {
1219                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1220                         cpr = txq->cp_ring;
1221                 } else {
1222                         rxq = bp->rx_queues[i];
1223                         cpr = rxq->cp_ring;
1224                 }
1225
1226                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1227
1228                 if (rc)
1229                         return rc;
1230         }
1231         return rc;
1232 }
1233
1234 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1235 {
1236         uint16_t i;
1237         uint32_t rc = 0;
1238
1239         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1240                 unsigned int idx = i + 1;
1241
1242                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1243                         RTE_LOG(ERR, PMD,
1244                                 "Attempt to free invalid ring group %d\n",
1245                                 idx);
1246                         continue;
1247                 }
1248
1249                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1250
1251                 if (rc)
1252                         return rc;
1253         }
1254         return rc;
1255 }
1256
1257 static void bnxt_free_cp_ring(struct bnxt *bp,
1258                               struct bnxt_cp_ring_info *cpr, unsigned int idx)
1259 {
1260         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1261
1262         bnxt_hwrm_ring_free(bp, cp_ring,
1263                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1264         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1265         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1266         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1267                         sizeof(*cpr->cp_desc_ring));
1268         cpr->cp_raw_cons = 0;
1269 }
1270
1271 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1272 {
1273         unsigned int i;
1274         int rc = 0;
1275
1276         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1277                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1278                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1279                 struct bnxt_ring *ring = txr->tx_ring_struct;
1280                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1281                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1282
1283                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1284                         bnxt_hwrm_ring_free(bp, ring,
1285                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1286                         ring->fw_ring_id = INVALID_HW_RING_ID;
1287                         memset(txr->tx_desc_ring, 0,
1288                                         txr->tx_ring_struct->ring_size *
1289                                         sizeof(*txr->tx_desc_ring));
1290                         memset(txr->tx_buf_ring, 0,
1291                                         txr->tx_ring_struct->ring_size *
1292                                         sizeof(*txr->tx_buf_ring));
1293                         txr->tx_prod = 0;
1294                         txr->tx_cons = 0;
1295                 }
1296                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1297                         bnxt_free_cp_ring(bp, cpr, idx);
1298         }
1299
1300         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1301                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1302                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1303                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1304                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1305                 unsigned int idx = i + 1;
1306
1307                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1308                         bnxt_hwrm_ring_free(bp, ring,
1309                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1310                         ring->fw_ring_id = INVALID_HW_RING_ID;
1311                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1312                         memset(rxr->rx_desc_ring, 0,
1313                                         rxr->rx_ring_struct->ring_size *
1314                                         sizeof(*rxr->rx_desc_ring));
1315                         memset(rxr->rx_buf_ring, 0,
1316                                         rxr->rx_ring_struct->ring_size *
1317                                         sizeof(*rxr->rx_buf_ring));
1318                         rxr->rx_prod = 0;
1319                 }
1320                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1321                         bnxt_free_cp_ring(bp, cpr, idx);
1322         }
1323
1324         /* Default completion ring */
1325         {
1326                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1327
1328                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1329                         bnxt_free_cp_ring(bp, cpr, 0);
1330         }
1331
1332         return rc;
1333 }
1334
1335 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1336 {
1337         uint16_t i;
1338         uint32_t rc = 0;
1339
1340         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1341                 unsigned int idx = i + 1;
1342
1343                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1344                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1345                         continue;
1346
1347                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1348
1349                 if (rc)
1350                         return rc;
1351         }
1352         return rc;
1353 }
1354
1355 void bnxt_free_hwrm_resources(struct bnxt *bp)
1356 {
1357         /* Release memzone */
1358         rte_free(bp->hwrm_cmd_resp_addr);
1359         bp->hwrm_cmd_resp_addr = NULL;
1360         bp->hwrm_cmd_resp_dma_addr = 0;
1361 }
1362
1363 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1364 {
1365         struct rte_pci_device *pdev = bp->pdev;
1366         char type[RTE_MEMZONE_NAMESIZE];
1367
1368         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1369                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1370         bp->max_req_len = HWRM_MAX_REQ_LEN;
1371         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1372         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1373         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1374         if (bp->hwrm_cmd_resp_addr == NULL)
1375                 return -ENOMEM;
1376         bp->hwrm_cmd_resp_dma_addr =
1377                 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1378         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1379                 RTE_LOG(ERR, PMD,
1380                         "unable to map response address to physical memory\n");
1381                 return -ENOMEM;
1382         }
1383         rte_spinlock_init(&bp->hwrm_lock);
1384
1385         return 0;
1386 }
1387
1388 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1389 {
1390         struct bnxt_filter_info *filter;
1391         int rc = 0;
1392
1393         STAILQ_FOREACH(filter, &vnic->filter, next) {
1394                 rc = bnxt_hwrm_clear_filter(bp, filter);
1395                 if (rc)
1396                         break;
1397         }
1398         return rc;
1399 }
1400
1401 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1402 {
1403         struct bnxt_filter_info *filter;
1404         int rc = 0;
1405
1406         STAILQ_FOREACH(filter, &vnic->filter, next) {
1407                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1408                 if (rc)
1409                         break;
1410         }
1411         return rc;
1412 }
1413
1414 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1415 {
1416         struct bnxt_vnic_info *vnic;
1417         unsigned int i;
1418
1419         if (bp->vnic_info == NULL)
1420                 return;
1421
1422         vnic = &bp->vnic_info[0];
1423         if (BNXT_PF(bp))
1424                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1425
1426         /* VNIC resources */
1427         for (i = 0; i < bp->nr_vnics; i++) {
1428                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1429
1430                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1431
1432                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1433                 bnxt_hwrm_vnic_free(bp, vnic);
1434         }
1435         /* Ring resources */
1436         bnxt_free_all_hwrm_rings(bp);
1437         bnxt_free_all_hwrm_ring_grps(bp);
1438         bnxt_free_all_hwrm_stat_ctxs(bp);
1439 }
1440
1441 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1442 {
1443         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1444
1445         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1446                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1447
1448         switch (conf_link_speed) {
1449         case ETH_LINK_SPEED_10M_HD:
1450         case ETH_LINK_SPEED_100M_HD:
1451                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1452         }
1453         return hw_link_duplex;
1454 }
1455
1456 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1457 {
1458         uint16_t eth_link_speed = 0;
1459
1460         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1461                 return ETH_LINK_SPEED_AUTONEG;
1462
1463         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1464         case ETH_LINK_SPEED_100M:
1465         case ETH_LINK_SPEED_100M_HD:
1466                 eth_link_speed =
1467                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1468                 break;
1469         case ETH_LINK_SPEED_1G:
1470                 eth_link_speed =
1471                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1472                 break;
1473         case ETH_LINK_SPEED_2_5G:
1474                 eth_link_speed =
1475                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1476                 break;
1477         case ETH_LINK_SPEED_10G:
1478                 eth_link_speed =
1479                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1480                 break;
1481         case ETH_LINK_SPEED_20G:
1482                 eth_link_speed =
1483                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1484                 break;
1485         case ETH_LINK_SPEED_25G:
1486                 eth_link_speed =
1487                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1488                 break;
1489         case ETH_LINK_SPEED_40G:
1490                 eth_link_speed =
1491                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1492                 break;
1493         case ETH_LINK_SPEED_50G:
1494                 eth_link_speed =
1495                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1496                 break;
1497         default:
1498                 RTE_LOG(ERR, PMD,
1499                         "Unsupported link speed %d; default to AUTO\n",
1500                         conf_link_speed);
1501                 break;
1502         }
1503         return eth_link_speed;
1504 }
1505
1506 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1507                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1508                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1509                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1510
1511 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1512 {
1513         uint32_t one_speed;
1514
1515         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1516                 return 0;
1517
1518         if (link_speed & ETH_LINK_SPEED_FIXED) {
1519                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1520
1521                 if (one_speed & (one_speed - 1)) {
1522                         RTE_LOG(ERR, PMD,
1523                                 "Invalid advertised speeds (%u) for port %u\n",
1524                                 link_speed, port_id);
1525                         return -EINVAL;
1526                 }
1527                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1528                         RTE_LOG(ERR, PMD,
1529                                 "Unsupported advertised speed (%u) for port %u\n",
1530                                 link_speed, port_id);
1531                         return -EINVAL;
1532                 }
1533         } else {
1534                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1535                         RTE_LOG(ERR, PMD,
1536                                 "Unsupported advertised speeds (%u) for port %u\n",
1537                                 link_speed, port_id);
1538                         return -EINVAL;
1539                 }
1540         }
1541         return 0;
1542 }
1543
1544 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1545 {
1546         uint16_t ret = 0;
1547
1548         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1549                 link_speed = BNXT_SUPPORTED_SPEEDS;
1550
1551         if (link_speed & ETH_LINK_SPEED_100M)
1552                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1553         if (link_speed & ETH_LINK_SPEED_100M_HD)
1554                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1555         if (link_speed & ETH_LINK_SPEED_1G)
1556                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1557         if (link_speed & ETH_LINK_SPEED_2_5G)
1558                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1559         if (link_speed & ETH_LINK_SPEED_10G)
1560                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1561         if (link_speed & ETH_LINK_SPEED_20G)
1562                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1563         if (link_speed & ETH_LINK_SPEED_25G)
1564                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1565         if (link_speed & ETH_LINK_SPEED_40G)
1566                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1567         if (link_speed & ETH_LINK_SPEED_50G)
1568                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1569         return ret;
1570 }
1571
1572 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1573 {
1574         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1575
1576         switch (hw_link_speed) {
1577         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1578                 eth_link_speed = ETH_SPEED_NUM_100M;
1579                 break;
1580         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1581                 eth_link_speed = ETH_SPEED_NUM_1G;
1582                 break;
1583         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1584                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1585                 break;
1586         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1587                 eth_link_speed = ETH_SPEED_NUM_10G;
1588                 break;
1589         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1590                 eth_link_speed = ETH_SPEED_NUM_20G;
1591                 break;
1592         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1593                 eth_link_speed = ETH_SPEED_NUM_25G;
1594                 break;
1595         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1596                 eth_link_speed = ETH_SPEED_NUM_40G;
1597                 break;
1598         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1599                 eth_link_speed = ETH_SPEED_NUM_50G;
1600                 break;
1601         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1602         default:
1603                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1604                         hw_link_speed);
1605                 break;
1606         }
1607         return eth_link_speed;
1608 }
1609
1610 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1611 {
1612         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1613
1614         switch (hw_link_duplex) {
1615         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1616         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1617                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1618                 break;
1619         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1620                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1621                 break;
1622         default:
1623                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1624                         hw_link_duplex);
1625                 break;
1626         }
1627         return eth_link_duplex;
1628 }
1629
1630 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1631 {
1632         int rc = 0;
1633         struct bnxt_link_info *link_info = &bp->link_info;
1634
1635         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1636         if (rc) {
1637                 RTE_LOG(ERR, PMD,
1638                         "Get link config failed with rc %d\n", rc);
1639                 goto exit;
1640         }
1641         if (link_info->link_up)
1642                 link->link_speed =
1643                         bnxt_parse_hw_link_speed(link_info->link_speed);
1644         else
1645                 link->link_speed = ETH_LINK_SPEED_10M;
1646         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1647         link->link_status = link_info->link_up;
1648         link->link_autoneg = link_info->auto_mode ==
1649                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1650                 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1651 exit:
1652         return rc;
1653 }
1654
1655 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1656 {
1657         int rc = 0;
1658         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1659         struct bnxt_link_info link_req;
1660         uint16_t speed;
1661
1662         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1663                 return 0;
1664
1665         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1666                         bp->eth_dev->data->port_id);
1667         if (rc)
1668                 goto error;
1669
1670         memset(&link_req, 0, sizeof(link_req));
1671         link_req.link_up = link_up;
1672         if (!link_up)
1673                 goto port_phy_cfg;
1674
1675         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1676         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1677         if (speed == 0) {
1678                 link_req.phy_flags |=
1679                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1680                 link_req.auto_mode =
1681                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1682                 link_req.auto_link_speed_mask =
1683                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1684         } else {
1685                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1686                 link_req.link_speed = speed;
1687                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1688         }
1689         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1690         link_req.auto_pause = bp->link_info.auto_pause;
1691         link_req.force_pause = bp->link_info.force_pause;
1692
1693 port_phy_cfg:
1694         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1695         if (rc) {
1696                 RTE_LOG(ERR, PMD,
1697                         "Set link config failed with rc %d\n", rc);
1698         }
1699
1700         rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1701 error:
1702         return rc;
1703 }
1704
1705 /* JIRA 22088 */
1706 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1707 {
1708         struct hwrm_func_qcfg_input req = {0};
1709         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1710         int rc = 0;
1711
1712         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1713         req.fid = rte_cpu_to_le_16(0xffff);
1714
1715         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1716
1717         HWRM_CHECK_RESULT;
1718
1719         /* Hard Coded.. 0xfff VLAN ID mask */
1720         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1721
1722         switch (resp->port_partition_type) {
1723         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1724         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1725         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1726                 bp->port_partition_type = resp->port_partition_type;
1727                 break;
1728         default:
1729                 bp->port_partition_type = 0;
1730                 break;
1731         }
1732
1733         return rc;
1734 }
1735
1736 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1737                                    struct hwrm_func_qcaps_output *qcaps)
1738 {
1739         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1740         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1741                sizeof(qcaps->mac_address));
1742         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1743         qcaps->max_rx_rings = fcfg->num_rx_rings;
1744         qcaps->max_tx_rings = fcfg->num_tx_rings;
1745         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1746         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1747         qcaps->max_vfs = 0;
1748         qcaps->first_vf_id = 0;
1749         qcaps->max_vnics = fcfg->num_vnics;
1750         qcaps->max_decap_records = 0;
1751         qcaps->max_encap_records = 0;
1752         qcaps->max_tx_wm_flows = 0;
1753         qcaps->max_tx_em_flows = 0;
1754         qcaps->max_rx_wm_flows = 0;
1755         qcaps->max_rx_em_flows = 0;
1756         qcaps->max_flow_id = 0;
1757         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1758         qcaps->max_sp_tx_rings = 0;
1759         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1760 }
1761
1762 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1763 {
1764         struct hwrm_func_cfg_input req = {0};
1765         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1766         int rc;
1767
1768         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1769                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1770                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1771                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1772                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1773                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1774                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1775                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1776                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1777                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1778         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1779         req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1780                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1781         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1782                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1783         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1784         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1785         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1786         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1787         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1788         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1789         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1790         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1791         req.fid = rte_cpu_to_le_16(0xffff);
1792
1793         HWRM_PREP(req, FUNC_CFG, -1, resp);
1794
1795         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1796         HWRM_CHECK_RESULT;
1797
1798         return rc;
1799 }
1800
1801 static void populate_vf_func_cfg_req(struct bnxt *bp,
1802                                      struct hwrm_func_cfg_input *req,
1803                                      int num_vfs)
1804 {
1805         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1806                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1807                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1808                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1809                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1810                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1811                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1812                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1813                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1814                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1815
1816         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1817                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1818         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1819                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1820         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
1821                                                 (num_vfs + 1));
1822         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
1823         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
1824                                                (num_vfs + 1));
1825         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
1826         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
1827         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
1828         /* TODO: For now, do not support VMDq/RFS on VFs. */
1829         req->num_vnics = rte_cpu_to_le_16(1);
1830         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
1831                                                  (num_vfs + 1));
1832 }
1833
1834 static void add_random_mac_if_needed(struct bnxt *bp,
1835                                      struct hwrm_func_cfg_input *cfg_req,
1836                                      int vf)
1837 {
1838         struct ether_addr mac;
1839
1840         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
1841                 return;
1842
1843         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
1844                 cfg_req->enables |=
1845                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1846                 eth_random_addr(cfg_req->dflt_mac_addr);
1847                 bp->pf.vf_info[vf].random_mac = true;
1848         } else {
1849                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
1850         }
1851 }
1852
1853 static void reserve_resources_from_vf(struct bnxt *bp,
1854                                       struct hwrm_func_cfg_input *cfg_req,
1855                                       int vf)
1856 {
1857         struct hwrm_func_qcaps_input req = {0};
1858         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1859         int rc;
1860
1861         /* Get the actual allocated values now */
1862         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
1863         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1864         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1865
1866         if (rc) {
1867                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
1868                 copy_func_cfg_to_qcaps(cfg_req, resp);
1869         } else if (resp->error_code) {
1870                 rc = rte_le_to_cpu_16(resp->error_code);
1871                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
1872                 copy_func_cfg_to_qcaps(cfg_req, resp);
1873         }
1874
1875         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
1876         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
1877         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
1878         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
1879         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
1880         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
1881         /*
1882          * TODO: While not supporting VMDq with VFs, max_vnics is always
1883          * forced to 1 in this case
1884          */
1885         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
1886         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
1887 }
1888
1889 static int update_pf_resource_max(struct bnxt *bp)
1890 {
1891         struct hwrm_func_qcfg_input req = {0};
1892         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1893         int rc;
1894
1895         /* And copy the allocated numbers into the pf struct */
1896         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1897         req.fid = rte_cpu_to_le_16(0xffff);
1898         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1899         HWRM_CHECK_RESULT;
1900
1901         /* Only TX ring value reflects actual allocation? TODO */
1902         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
1903         bp->pf.evb_mode = resp->evb_mode;
1904
1905         return rc;
1906 }
1907
1908 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
1909 {
1910         int rc;
1911
1912         if (!BNXT_PF(bp)) {
1913                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1914                 return -1;
1915         }
1916
1917         rc = bnxt_hwrm_func_qcaps(bp);
1918         if (rc)
1919                 return rc;
1920
1921         bp->pf.func_cfg_flags &=
1922                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1923                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1924         bp->pf.func_cfg_flags |=
1925                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
1926         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
1927         return rc;
1928 }
1929
1930 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
1931 {
1932         struct hwrm_func_cfg_input req = {0};
1933         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1934         int i;
1935         size_t sz;
1936         int rc = 0;
1937         size_t req_buf_sz;
1938
1939         if (!BNXT_PF(bp)) {
1940                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1941                 return -1;
1942         }
1943
1944         rc = bnxt_hwrm_func_qcaps(bp);
1945
1946         if (rc)
1947                 return rc;
1948
1949         bp->pf.active_vfs = num_vfs;
1950
1951         /*
1952          * First, configure the PF to only use one TX ring.  This ensures that
1953          * there are enough rings for all VFs.
1954          *
1955          * If we don't do this, when we call func_alloc() later, we will lock
1956          * extra rings to the PF that won't be available during func_cfg() of
1957          * the VFs.
1958          *
1959          * This has been fixed with firmware versions above 20.6.54
1960          */
1961         bp->pf.func_cfg_flags &=
1962                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1963                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1964         bp->pf.func_cfg_flags |=
1965                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
1966         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
1967         if (rc)
1968                 return rc;
1969
1970         /*
1971          * Now, create and register a buffer to hold forwarded VF requests
1972          */
1973         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
1974         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
1975                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
1976         if (bp->pf.vf_req_buf == NULL) {
1977                 rc = -ENOMEM;
1978                 goto error_free;
1979         }
1980         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
1981                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
1982         for (i = 0; i < num_vfs; i++)
1983                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
1984                                         (i * HWRM_MAX_REQ_LEN);
1985
1986         rc = bnxt_hwrm_func_buf_rgtr(bp);
1987         if (rc)
1988                 goto error_free;
1989
1990         populate_vf_func_cfg_req(bp, &req, num_vfs);
1991
1992         bp->pf.active_vfs = 0;
1993         for (i = 0; i < num_vfs; i++) {
1994                 add_random_mac_if_needed(bp, &req, i);
1995
1996                 HWRM_PREP(req, FUNC_CFG, -1, resp);
1997                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
1998                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
1999                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2000
2001                 /* Clear enable flag for next pass */
2002                 req.enables &= ~rte_cpu_to_le_32(
2003                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2004
2005                 if (rc || resp->error_code) {
2006                         RTE_LOG(ERR, PMD,
2007                                 "Failed to initizlie VF %d\n", i);
2008                         RTE_LOG(ERR, PMD,
2009                                 "Not all VFs available. (%d, %d)\n",
2010                                 rc, resp->error_code);
2011                         break;
2012                 }
2013
2014                 reserve_resources_from_vf(bp, &req, i);
2015                 bp->pf.active_vfs++;
2016         }
2017
2018         /*
2019          * Now configure the PF to use "the rest" of the resources
2020          * We're using STD_TX_RING_MODE here though which will limit the TX
2021          * rings.  This will allow QoS to function properly.  Not setting this
2022          * will cause PF rings to break bandwidth settings.
2023          */
2024         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2025         if (rc)
2026                 goto error_free;
2027
2028         rc = update_pf_resource_max(bp);
2029         if (rc)
2030                 goto error_free;
2031
2032         return rc;
2033
2034 error_free:
2035         bnxt_hwrm_func_buf_unrgtr(bp);
2036         return rc;
2037 }
2038
2039
2040 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2041 {
2042         int rc = 0;
2043         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2044         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2045
2046         HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2047
2048         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2049         req.req_buf_page_size = rte_cpu_to_le_16(
2050                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2051         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2052         req.req_buf_page_addr[0] =
2053                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2054         if (req.req_buf_page_addr[0] == 0) {
2055                 RTE_LOG(ERR, PMD,
2056                         "unable to map buffer address to physical memory\n");
2057                 return -ENOMEM;
2058         }
2059
2060         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2061
2062         HWRM_CHECK_RESULT;
2063
2064         return rc;
2065 }
2066
2067 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2068 {
2069         int rc = 0;
2070         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2071         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2072
2073         HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2074
2075         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2076
2077         HWRM_CHECK_RESULT;
2078
2079         return rc;
2080 }
2081
2082 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2083 {
2084         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2085         struct hwrm_func_cfg_input req = {0};
2086         int rc;
2087
2088         HWRM_PREP(req, FUNC_CFG, -1, resp);
2089         req.fid = rte_cpu_to_le_16(0xffff);
2090         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2091         req.enables = rte_cpu_to_le_32(
2092                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2093         req.async_event_cr = rte_cpu_to_le_16(
2094                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2095         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2096         HWRM_CHECK_RESULT;
2097
2098         return rc;
2099 }
2100
2101 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2102 {
2103         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2104         struct hwrm_func_vf_cfg_input req = {0};
2105         int rc;
2106
2107         HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2108         req.enables = rte_cpu_to_le_32(
2109                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2110         req.async_event_cr = rte_cpu_to_le_16(
2111                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2112         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2113         HWRM_CHECK_RESULT;
2114
2115         return rc;
2116 }
2117
2118 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2119                               void *encaped, size_t ec_size)
2120 {
2121         int rc = 0;
2122         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2123         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2124
2125         if (ec_size > sizeof(req.encap_request))
2126                 return -1;
2127
2128         HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2129
2130         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2131         memcpy(req.encap_request, encaped, ec_size);
2132
2133         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2134
2135         HWRM_CHECK_RESULT;
2136
2137         return rc;
2138 }
2139
2140 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2141                                        struct ether_addr *mac)
2142 {
2143         struct hwrm_func_qcfg_input req = {0};
2144         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2145         int rc;
2146
2147         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2148         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2149         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2150
2151         HWRM_CHECK_RESULT;
2152
2153         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2154         return rc;
2155 }
2156
2157 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2158                             void *encaped, size_t ec_size)
2159 {
2160         int rc = 0;
2161         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2162         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2163
2164         if (ec_size > sizeof(req.encap_request))
2165                 return -1;
2166
2167         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2168
2169         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2170         memcpy(req.encap_request, encaped, ec_size);
2171
2172         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2173
2174         HWRM_CHECK_RESULT;
2175
2176         return rc;
2177 }