net/bnxt: fix VLAN antispoof configuration code
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                2000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
175 {
176         int rc;
177
178         rte_spinlock_lock(&bp->hwrm_lock);
179         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
180         rte_spinlock_unlock(&bp->hwrm_lock);
181         return rc;
182 }
183
184 #define HWRM_PREP(req, type, cr, resp) \
185         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
186         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
187         req.cmpl_ring = rte_cpu_to_le_16(cr); \
188         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
189         req.target_id = rte_cpu_to_le_16(0xffff); \
190         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
191
192 #define HWRM_CHECK_RESULT \
193         { \
194                 if (rc) { \
195                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
196                                 __func__, rc); \
197                         return rc; \
198                 } \
199                 if (resp->error_code) { \
200                         rc = rte_le_to_cpu_16(resp->error_code); \
201                         if (resp->resp_len >= 16) { \
202                                 struct hwrm_err_output *tmp_hwrm_err_op = \
203                                                         (void *)resp; \
204                                 RTE_LOG(ERR, PMD, \
205                                         "%s error %d:%d:%08x:%04x\n", \
206                                         __func__, \
207                                         rc, tmp_hwrm_err_op->cmd_err, \
208                                         rte_le_to_cpu_32(\
209                                                 tmp_hwrm_err_op->opaque_0), \
210                                         rte_le_to_cpu_16(\
211                                                 tmp_hwrm_err_op->opaque_1)); \
212                         } \
213                         else { \
214                                 RTE_LOG(ERR, PMD, \
215                                         "%s error %d\n", __func__, rc); \
216                         } \
217                         return rc; \
218                 } \
219         }
220
221 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
222 {
223         int rc = 0;
224         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
225         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
226
227         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
228         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
229         req.mask = 0;
230
231         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
232
233         HWRM_CHECK_RESULT;
234
235         return rc;
236 }
237
238 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
239                                  struct bnxt_vnic_info *vnic,
240                                  uint16_t vlan_count,
241                                  struct bnxt_vlan_table_entry *vlan_table)
242 {
243         int rc = 0;
244         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
245         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
246         uint32_t mask = 0;
247
248         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
249         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
250
251         /* FIXME add multicast flag, when multicast adding options is supported
252          * by ethtool.
253          */
254         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
255                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
256         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
257                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
258         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
259                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
260         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
261                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
262         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
263                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
264         if (vnic->mc_addr_cnt) {
265                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
266                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
267                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
268         }
269         if (vlan_count && vlan_table) {
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
271                 req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
272                          rte_mem_virt2phy(vlan_table));
273                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
274         }
275         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
276                                     mask);
277
278         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
279
280         HWRM_CHECK_RESULT;
281
282         return rc;
283 }
284
285 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
286                         uint16_t vlan_count,
287                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
288 {
289         int rc = 0;
290         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
291         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
292                                                 bp->hwrm_cmd_resp_addr;
293
294         /*
295          * Older HWRM versions did not support this command, and the set_rx_mask
296          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
297          * removed from set_rx_mask call, and this command was added.
298          *
299          * This command is also present from 1.7.8.11 and higher,
300          * as well as 1.7.8.0
301          */
302         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
303                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
304                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
305                                         (11)))
306                                 return 0;
307                 }
308         }
309         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, -1, resp);
310         req.fid = rte_cpu_to_le_16(fid);
311
312         req.vlan_tag_mask_tbl_addr =
313                 rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
314         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
315
316         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
317
318         HWRM_CHECK_RESULT;
319
320         return rc;
321 }
322
323 int bnxt_hwrm_clear_filter(struct bnxt *bp,
324                            struct bnxt_filter_info *filter)
325 {
326         int rc = 0;
327         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
328         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
329
330         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
331
332         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
333
334         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
335
336         HWRM_CHECK_RESULT;
337
338         filter->fw_l2_filter_id = -1;
339
340         return 0;
341 }
342
343 int bnxt_hwrm_set_filter(struct bnxt *bp,
344                          uint16_t dst_id,
345                          struct bnxt_filter_info *filter)
346 {
347         int rc = 0;
348         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
349         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
350         uint32_t enables = 0;
351
352         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
353
354         req.flags = rte_cpu_to_le_32(filter->flags);
355
356         enables = filter->enables |
357               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
358         req.dst_id = rte_cpu_to_le_16(dst_id);
359
360         if (enables &
361             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
362                 memcpy(req.l2_addr, filter->l2_addr,
363                        ETHER_ADDR_LEN);
364         if (enables &
365             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
366                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
367                        ETHER_ADDR_LEN);
368         if (enables &
369             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
370                 req.l2_ovlan = filter->l2_ovlan;
371         if (enables &
372             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
373                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
374         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
375                 req.src_id = rte_cpu_to_le_32(filter->src_id);
376         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
377                 req.src_type = filter->src_type;
378
379         req.enables = rte_cpu_to_le_32(enables);
380
381         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
382
383         HWRM_CHECK_RESULT;
384
385         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
386
387         return rc;
388 }
389
390 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
391 {
392         int rc = 0;
393         struct hwrm_func_qcaps_input req = {.req_type = 0 };
394         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
395         uint16_t new_max_vfs;
396         int i;
397
398         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
399
400         req.fid = rte_cpu_to_le_16(0xffff);
401
402         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
403
404         HWRM_CHECK_RESULT;
405
406         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
407         if (BNXT_PF(bp)) {
408                 bp->pf.port_id = resp->port_id;
409                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
410                 new_max_vfs = bp->pdev->max_vfs;
411                 if (new_max_vfs != bp->pf.max_vfs) {
412                         if (bp->pf.vf_info)
413                                 rte_free(bp->pf.vf_info);
414                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
415                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
416                         bp->pf.max_vfs = new_max_vfs;
417                         for (i = 0; i < new_max_vfs; i++) {
418                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
419                                 bp->pf.vf_info[i].vlan_table =
420                                         rte_zmalloc("VF VLAN table",
421                                                     getpagesize(),
422                                                     getpagesize());
423                                 if (bp->pf.vf_info[i].vlan_table == NULL)
424                                         RTE_LOG(ERR, PMD,
425                                         "Fail to alloc VLAN table for VF %d\n",
426                                         i);
427                                 else
428                                         rte_mem_lock_page(
429                                                 bp->pf.vf_info[i].vlan_table);
430                                 bp->pf.vf_info[i].vlan_as_table =
431                                         rte_zmalloc("VF VLAN AS table",
432                                                     getpagesize(),
433                                                     getpagesize());
434                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
435                                         RTE_LOG(ERR, PMD,
436                                         "Alloc VLAN AS table for VF %d fail\n",
437                                         i);
438                                 else
439                                         rte_mem_lock_page(
440                                                bp->pf.vf_info[i].vlan_as_table);
441                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
442                         }
443                 }
444         }
445
446         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
447         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
448         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
449         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
450         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
451         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
452         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
453         /* TODO: For now, do not support VMDq/RFS on VFs. */
454         if (BNXT_PF(bp)) {
455                 if (bp->pf.max_vfs)
456                         bp->max_vnics = 1;
457                 else
458                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
459         } else {
460                 bp->max_vnics = 1;
461         }
462         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
463         if (BNXT_PF(bp))
464                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
465
466         return rc;
467 }
468
469 int bnxt_hwrm_func_reset(struct bnxt *bp)
470 {
471         int rc = 0;
472         struct hwrm_func_reset_input req = {.req_type = 0 };
473         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
474
475         HWRM_PREP(req, FUNC_RESET, -1, resp);
476
477         req.enables = rte_cpu_to_le_32(0);
478
479         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
480
481         HWRM_CHECK_RESULT;
482
483         return rc;
484 }
485
486 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
487 {
488         int rc;
489         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
490         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
491
492         if (bp->flags & BNXT_FLAG_REGISTERED)
493                 return 0;
494
495         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
496         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
497                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
498         req.ver_maj = RTE_VER_YEAR;
499         req.ver_min = RTE_VER_MONTH;
500         req.ver_upd = RTE_VER_MINOR;
501
502         if (BNXT_PF(bp)) {
503                 req.enables |= rte_cpu_to_le_32(
504                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
505                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
506                        RTE_MIN(sizeof(req.vf_req_fwd),
507                                sizeof(bp->pf.vf_req_fwd)));
508         }
509
510         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
511         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
512
513         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
514
515         HWRM_CHECK_RESULT;
516
517         bp->flags |= BNXT_FLAG_REGISTERED;
518
519         return rc;
520 }
521
522 int bnxt_hwrm_ver_get(struct bnxt *bp)
523 {
524         int rc = 0;
525         struct hwrm_ver_get_input req = {.req_type = 0 };
526         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
527         uint32_t my_version;
528         uint32_t fw_version;
529         uint16_t max_resp_len;
530         char type[RTE_MEMZONE_NAMESIZE];
531         uint32_t dev_caps_cfg;
532
533         bp->max_req_len = HWRM_MAX_REQ_LEN;
534         HWRM_PREP(req, VER_GET, -1, resp);
535
536         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
537         req.hwrm_intf_min = HWRM_VERSION_MINOR;
538         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
539
540         /*
541          * Hold the lock since we may be adjusting the response pointers.
542          */
543         rte_spinlock_lock(&bp->hwrm_lock);
544         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
545
546         HWRM_CHECK_RESULT;
547
548         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
549                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
550                 resp->hwrm_intf_upd,
551                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
552         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
553                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
554         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
555                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
556
557         my_version = HWRM_VERSION_MAJOR << 16;
558         my_version |= HWRM_VERSION_MINOR << 8;
559         my_version |= HWRM_VERSION_UPDATE;
560
561         fw_version = resp->hwrm_intf_maj << 16;
562         fw_version |= resp->hwrm_intf_min << 8;
563         fw_version |= resp->hwrm_intf_upd;
564
565         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
566                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
567                 rc = -EINVAL;
568                 goto error;
569         }
570
571         if (my_version != fw_version) {
572                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
573                 if (my_version < fw_version) {
574                         RTE_LOG(INFO, PMD,
575                                 "Firmware API version is newer than driver.\n");
576                         RTE_LOG(INFO, PMD,
577                                 "The driver may be missing features.\n");
578                 } else {
579                         RTE_LOG(INFO, PMD,
580                                 "Firmware API version is older than driver.\n");
581                         RTE_LOG(INFO, PMD,
582                                 "Not all driver features may be functional.\n");
583                 }
584         }
585
586         if (bp->max_req_len > resp->max_req_win_len) {
587                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
588                 rc = -EINVAL;
589         }
590         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
591         max_resp_len = resp->max_resp_len;
592         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
593
594         if (bp->max_resp_len != max_resp_len) {
595                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
596                         bp->pdev->addr.domain, bp->pdev->addr.bus,
597                         bp->pdev->addr.devid, bp->pdev->addr.function);
598
599                 rte_free(bp->hwrm_cmd_resp_addr);
600
601                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
602                 if (bp->hwrm_cmd_resp_addr == NULL) {
603                         rc = -ENOMEM;
604                         goto error;
605                 }
606                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
607                 bp->hwrm_cmd_resp_dma_addr =
608                         rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
609                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
610                         RTE_LOG(ERR, PMD,
611                         "Unable to map response buffer to physical memory.\n");
612                         rc = -ENOMEM;
613                         goto error;
614                 }
615                 bp->max_resp_len = max_resp_len;
616         }
617
618         if ((dev_caps_cfg &
619                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
620             (dev_caps_cfg &
621              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
622                 RTE_LOG(DEBUG, PMD, "Short command supported\n");
623
624                 rte_free(bp->hwrm_short_cmd_req_addr);
625
626                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
627                                                         bp->max_req_len, 0);
628                 if (bp->hwrm_short_cmd_req_addr == NULL) {
629                         rc = -ENOMEM;
630                         goto error;
631                 }
632                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
633                 bp->hwrm_short_cmd_req_dma_addr =
634                         rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
635                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
636                         rte_free(bp->hwrm_short_cmd_req_addr);
637                         RTE_LOG(ERR, PMD,
638                                 "Unable to map buffer to physical memory.\n");
639                         rc = -ENOMEM;
640                         goto error;
641                 }
642
643                 bp->flags |= BNXT_FLAG_SHORT_CMD;
644         }
645
646 error:
647         rte_spinlock_unlock(&bp->hwrm_lock);
648         return rc;
649 }
650
651 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
652 {
653         int rc;
654         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
655         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
656
657         if (!(bp->flags & BNXT_FLAG_REGISTERED))
658                 return 0;
659
660         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
661         req.flags = flags;
662
663         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
664
665         HWRM_CHECK_RESULT;
666
667         bp->flags &= ~BNXT_FLAG_REGISTERED;
668
669         return rc;
670 }
671
672 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
673 {
674         int rc = 0;
675         struct hwrm_port_phy_cfg_input req = {0};
676         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
677         uint32_t enables = 0;
678         uint32_t link_speed_mask =
679                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
680
681         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
682
683         if (conf->link_up) {
684                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
685                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
686                 /*
687                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
688                  * any auto mode, even "none".
689                  */
690                 if (!conf->link_speed) {
691                         req.auto_mode = conf->auto_mode;
692                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
693                         if (conf->auto_mode ==
694                             HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
695                                 req.auto_link_speed_mask =
696                                         conf->auto_link_speed_mask;
697                                 enables |= link_speed_mask;
698                         }
699                         if (bp->link_info.auto_link_speed) {
700                                 req.auto_link_speed =
701                                         bp->link_info.auto_link_speed;
702                                 enables |=
703                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
704                         }
705                 }
706                 req.auto_duplex = conf->duplex;
707                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
708                 req.auto_pause = conf->auto_pause;
709                 req.force_pause = conf->force_pause;
710                 /* Set force_pause if there is no auto or if there is a force */
711                 if (req.auto_pause && !req.force_pause)
712                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
713                 else
714                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
715
716                 req.enables = rte_cpu_to_le_32(enables);
717         } else {
718                 req.flags =
719                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
720                 RTE_LOG(INFO, PMD, "Force Link Down\n");
721         }
722
723         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
724
725         HWRM_CHECK_RESULT;
726
727         return rc;
728 }
729
730 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
731                                    struct bnxt_link_info *link_info)
732 {
733         int rc = 0;
734         struct hwrm_port_phy_qcfg_input req = {0};
735         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
736
737         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
738
739         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
740
741         HWRM_CHECK_RESULT;
742
743         link_info->phy_link_status = resp->link;
744         link_info->link_up =
745                 (link_info->phy_link_status ==
746                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
747         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
748         link_info->duplex = resp->duplex;
749         link_info->pause = resp->pause;
750         link_info->auto_pause = resp->auto_pause;
751         link_info->force_pause = resp->force_pause;
752         link_info->auto_mode = resp->auto_mode;
753
754         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
755         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
756         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
757         link_info->phy_ver[0] = resp->phy_maj;
758         link_info->phy_ver[1] = resp->phy_min;
759         link_info->phy_ver[2] = resp->phy_bld;
760
761         return rc;
762 }
763
764 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
765 {
766         int rc = 0;
767         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
768         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
769
770         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
771
772         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
773
774         HWRM_CHECK_RESULT;
775
776 #define GET_QUEUE_INFO(x) \
777         bp->cos_queue[x].id = resp->queue_id##x; \
778         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
779
780         GET_QUEUE_INFO(0);
781         GET_QUEUE_INFO(1);
782         GET_QUEUE_INFO(2);
783         GET_QUEUE_INFO(3);
784         GET_QUEUE_INFO(4);
785         GET_QUEUE_INFO(5);
786         GET_QUEUE_INFO(6);
787         GET_QUEUE_INFO(7);
788
789         return rc;
790 }
791
792 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
793                          struct bnxt_ring *ring,
794                          uint32_t ring_type, uint32_t map_index,
795                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
796 {
797         int rc = 0;
798         uint32_t enables = 0;
799         struct hwrm_ring_alloc_input req = {.req_type = 0 };
800         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
801
802         HWRM_PREP(req, RING_ALLOC, -1, resp);
803
804         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
805         req.fbo = rte_cpu_to_le_32(0);
806         /* Association of ring index with doorbell index */
807         req.logical_id = rte_cpu_to_le_16(map_index);
808         req.length = rte_cpu_to_le_32(ring->ring_size);
809
810         switch (ring_type) {
811         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
812                 req.queue_id = bp->cos_queue[0].id;
813                 /* FALLTHROUGH */
814         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
815                 req.ring_type = ring_type;
816                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
817                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
818                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
819                         enables |=
820                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
821                 break;
822         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
823                 req.ring_type = ring_type;
824                 /*
825                  * TODO: Some HWRM versions crash with
826                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
827                  */
828                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
829                 break;
830         default:
831                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
832                         ring_type);
833                 return -1;
834         }
835         req.enables = rte_cpu_to_le_32(enables);
836
837         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
838
839         if (rc || resp->error_code) {
840                 if (rc == 0 && resp->error_code)
841                         rc = rte_le_to_cpu_16(resp->error_code);
842                 switch (ring_type) {
843                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
844                         RTE_LOG(ERR, PMD,
845                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
846                         return rc;
847                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
848                         RTE_LOG(ERR, PMD,
849                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
850                         return rc;
851                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
852                         RTE_LOG(ERR, PMD,
853                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
854                         return rc;
855                 default:
856                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
857                         return rc;
858                 }
859         }
860
861         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
862         return rc;
863 }
864
865 int bnxt_hwrm_ring_free(struct bnxt *bp,
866                         struct bnxt_ring *ring, uint32_t ring_type)
867 {
868         int rc;
869         struct hwrm_ring_free_input req = {.req_type = 0 };
870         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
871
872         HWRM_PREP(req, RING_FREE, -1, resp);
873
874         req.ring_type = ring_type;
875         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
876
877         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
878
879         if (rc || resp->error_code) {
880                 if (rc == 0 && resp->error_code)
881                         rc = rte_le_to_cpu_16(resp->error_code);
882
883                 switch (ring_type) {
884                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
885                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
886                                 rc);
887                         return rc;
888                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
889                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
890                                 rc);
891                         return rc;
892                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
893                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
894                                 rc);
895                         return rc;
896                 default:
897                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
898                         return rc;
899                 }
900         }
901         return 0;
902 }
903
904 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
905 {
906         int rc = 0;
907         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
908         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
909
910         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
911
912         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
913         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
914         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
915         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
916
917         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
918
919         HWRM_CHECK_RESULT;
920
921         bp->grp_info[idx].fw_grp_id =
922             rte_le_to_cpu_16(resp->ring_group_id);
923
924         return rc;
925 }
926
927 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
928 {
929         int rc;
930         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
931         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
932
933         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
934
935         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
936
937         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
938
939         HWRM_CHECK_RESULT;
940
941         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
942         return rc;
943 }
944
945 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
946 {
947         int rc = 0;
948         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
949         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
950
951         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
952                 return rc;
953
954         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
955
956         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
957
958         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
959
960         HWRM_CHECK_RESULT;
961
962         return rc;
963 }
964
965 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
966                                 unsigned int idx __rte_unused)
967 {
968         int rc;
969         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
970         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
971
972         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
973
974         req.update_period_ms = rte_cpu_to_le_32(0);
975
976         req.stats_dma_addr =
977             rte_cpu_to_le_64(cpr->hw_stats_map);
978
979         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
980
981         HWRM_CHECK_RESULT;
982
983         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
984
985         return rc;
986 }
987
988 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
989                                 unsigned int idx __rte_unused)
990 {
991         int rc;
992         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
993         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
994
995         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
996
997         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
998
999         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1000
1001         HWRM_CHECK_RESULT;
1002
1003         return rc;
1004 }
1005
1006 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1007 {
1008         int rc = 0, i, j;
1009         struct hwrm_vnic_alloc_input req = { 0 };
1010         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1011
1012         /* map ring groups to this vnic */
1013         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1014                 vnic->start_grp_id, vnic->end_grp_id);
1015         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1016                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1017         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1018         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1019         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1020         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1021         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1022                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1023         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
1024
1025         if (vnic->func_default)
1026                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1027         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1028
1029         HWRM_CHECK_RESULT;
1030
1031         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1032         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1033         return rc;
1034 }
1035
1036 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1037                                         struct bnxt_vnic_info *vnic,
1038                                         struct bnxt_plcmodes_cfg *pmode)
1039 {
1040         int rc = 0;
1041         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1042         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1043
1044         HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
1045
1046         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1047
1048         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1049
1050         HWRM_CHECK_RESULT;
1051
1052         pmode->flags = rte_le_to_cpu_32(resp->flags);
1053         /* dflt_vnic bit doesn't exist in the _cfg command */
1054         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1055         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1056         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1057         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1058
1059         return rc;
1060 }
1061
1062 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1063                                        struct bnxt_vnic_info *vnic,
1064                                        struct bnxt_plcmodes_cfg *pmode)
1065 {
1066         int rc = 0;
1067         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1068         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1069
1070         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1071
1072         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1073         req.flags = rte_cpu_to_le_32(pmode->flags);
1074         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1075         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1076         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1077         req.enables = rte_cpu_to_le_32(
1078             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1079             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1080             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1081         );
1082
1083         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1084
1085         HWRM_CHECK_RESULT;
1086
1087         return rc;
1088 }
1089
1090 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1091 {
1092         int rc = 0;
1093         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1094         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1095         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1096         struct bnxt_plcmodes_cfg pmodes;
1097
1098         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1099                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1100                 return rc;
1101         }
1102
1103         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1104         if (rc)
1105                 return rc;
1106
1107         HWRM_PREP(req, VNIC_CFG, -1, resp);
1108
1109         /* Only RSS support for now TBD: COS & LB */
1110         req.enables =
1111             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1112                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1113         if (vnic->lb_rule != 0xffff)
1114                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1115         if (vnic->cos_rule != 0xffff)
1116                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1117         if (vnic->rss_rule != 0xffff)
1118                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1119         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1120         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1121         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1122         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1123         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1124         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1125         req.mru = rte_cpu_to_le_16(vnic->mru);
1126         if (vnic->func_default)
1127                 req.flags |=
1128                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1129         if (vnic->vlan_strip)
1130                 req.flags |=
1131                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1132         if (vnic->bd_stall)
1133                 req.flags |=
1134                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1135         if (vnic->roce_dual)
1136                 req.flags |= rte_cpu_to_le_32(
1137                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1138         if (vnic->roce_only)
1139                 req.flags |= rte_cpu_to_le_32(
1140                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1141         if (vnic->rss_dflt_cr)
1142                 req.flags |= rte_cpu_to_le_32(
1143                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1144
1145         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1146
1147         HWRM_CHECK_RESULT;
1148
1149         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1150
1151         return rc;
1152 }
1153
1154 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1155                 int16_t fw_vf_id)
1156 {
1157         int rc = 0;
1158         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1159         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1160
1161         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1162                 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1163                 return rc;
1164         }
1165         HWRM_PREP(req, VNIC_QCFG, -1, resp);
1166
1167         req.enables =
1168                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1169         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1170         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1171
1172         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1173
1174         HWRM_CHECK_RESULT;
1175
1176         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1177         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1178         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1179         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1180         vnic->mru = rte_le_to_cpu_16(resp->mru);
1181         vnic->func_default = rte_le_to_cpu_32(
1182                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1183         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1184                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1185         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1186                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1187         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1188                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1189         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1190                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1191         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1192                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1193
1194         return rc;
1195 }
1196
1197 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1198 {
1199         int rc = 0;
1200         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1201         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1202                                                 bp->hwrm_cmd_resp_addr;
1203
1204         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1205
1206         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1207
1208         HWRM_CHECK_RESULT;
1209
1210         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1211         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1212
1213         return rc;
1214 }
1215
1216 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1217 {
1218         int rc = 0;
1219         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1220         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1221                                                 bp->hwrm_cmd_resp_addr;
1222
1223         if (vnic->rss_rule == 0xffff) {
1224                 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1225                 return rc;
1226         }
1227         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1228
1229         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1230
1231         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1232
1233         HWRM_CHECK_RESULT;
1234
1235         vnic->rss_rule = INVALID_HW_RING_ID;
1236
1237         return rc;
1238 }
1239
1240 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1241 {
1242         int rc = 0;
1243         struct hwrm_vnic_free_input req = {.req_type = 0 };
1244         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1245
1246         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1247                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1248                 return rc;
1249         }
1250
1251         HWRM_PREP(req, VNIC_FREE, -1, resp);
1252
1253         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1254
1255         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1256
1257         HWRM_CHECK_RESULT;
1258
1259         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1260         return rc;
1261 }
1262
1263 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1264                            struct bnxt_vnic_info *vnic)
1265 {
1266         int rc = 0;
1267         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1268         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1269
1270         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1271
1272         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1273
1274         req.ring_grp_tbl_addr =
1275             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1276         req.hash_key_tbl_addr =
1277             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1278         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1279
1280         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1281
1282         HWRM_CHECK_RESULT;
1283
1284         return rc;
1285 }
1286
1287 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1288                         struct bnxt_vnic_info *vnic)
1289 {
1290         int rc = 0;
1291         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1292         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1293         uint16_t size;
1294
1295         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1296
1297         req.flags = rte_cpu_to_le_32(
1298                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1299
1300         req.enables = rte_cpu_to_le_32(
1301                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1302
1303         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1304         size -= RTE_PKTMBUF_HEADROOM;
1305
1306         req.jumbo_thresh = rte_cpu_to_le_16(size);
1307         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1308
1309         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1310
1311         HWRM_CHECK_RESULT;
1312
1313         return rc;
1314 }
1315
1316 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1317                         struct bnxt_vnic_info *vnic, bool enable)
1318 {
1319         int rc = 0;
1320         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1321         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1322
1323         HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1324
1325         if (enable) {
1326                 req.enables = rte_cpu_to_le_32(
1327                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1328                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1329                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1330                 req.flags = rte_cpu_to_le_32(
1331                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1332                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1333                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1334                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1335                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1336                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1337                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1338                 req.max_agg_segs = rte_cpu_to_le_16(5);
1339                 req.max_aggs =
1340                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1341                 req.min_agg_len = rte_cpu_to_le_32(512);
1342         }
1343
1344         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1345
1346         HWRM_CHECK_RESULT;
1347
1348         return rc;
1349 }
1350
1351 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1352 {
1353         struct hwrm_func_cfg_input req = {0};
1354         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1355         int rc;
1356
1357         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1358         req.enables = rte_cpu_to_le_32(
1359                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1360         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1361         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1362
1363         HWRM_PREP(req, FUNC_CFG, -1, resp);
1364
1365         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1366         HWRM_CHECK_RESULT;
1367
1368         bp->pf.vf_info[vf].random_mac = false;
1369
1370         return rc;
1371 }
1372
1373 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1374                                   uint64_t *dropped)
1375 {
1376         int rc = 0;
1377         struct hwrm_func_qstats_input req = {.req_type = 0};
1378         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1379
1380         HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1381
1382         req.fid = rte_cpu_to_le_16(fid);
1383
1384         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1385
1386         HWRM_CHECK_RESULT;
1387
1388         if (dropped)
1389                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1390
1391         return rc;
1392 }
1393
1394 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1395                           struct rte_eth_stats *stats)
1396 {
1397         int rc = 0;
1398         struct hwrm_func_qstats_input req = {.req_type = 0};
1399         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1400
1401         HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1402
1403         req.fid = rte_cpu_to_le_16(fid);
1404
1405         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1406
1407         HWRM_CHECK_RESULT;
1408
1409         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1410         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1411         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1412         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1413         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1414         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1415
1416         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1417         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1418         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1419         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1420         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1421         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1422
1423         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1424         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1425
1426         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1427
1428         return rc;
1429 }
1430
1431 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1432 {
1433         int rc = 0;
1434         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1435         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1436
1437         HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
1438
1439         req.fid = rte_cpu_to_le_16(fid);
1440
1441         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1442
1443         HWRM_CHECK_RESULT;
1444
1445         return rc;
1446 }
1447
1448 /*
1449  * HWRM utility functions
1450  */
1451
1452 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1453 {
1454         unsigned int i;
1455         int rc = 0;
1456
1457         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1458                 struct bnxt_tx_queue *txq;
1459                 struct bnxt_rx_queue *rxq;
1460                 struct bnxt_cp_ring_info *cpr;
1461
1462                 if (i >= bp->rx_cp_nr_rings) {
1463                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1464                         cpr = txq->cp_ring;
1465                 } else {
1466                         rxq = bp->rx_queues[i];
1467                         cpr = rxq->cp_ring;
1468                 }
1469
1470                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1471                 if (rc)
1472                         return rc;
1473         }
1474         return 0;
1475 }
1476
1477 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1478 {
1479         int rc;
1480         unsigned int i;
1481         struct bnxt_cp_ring_info *cpr;
1482
1483         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1484
1485                 if (i >= bp->rx_cp_nr_rings)
1486                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1487                 else
1488                         cpr = bp->rx_queues[i]->cp_ring;
1489                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1490                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1491                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1492                         /*
1493                          * TODO. Need a better way to reset grp_info.stats_ctx
1494                          * for Rx rings only. stats_ctx is not saved for Tx
1495                          * in grp_info.
1496                          */
1497                         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1498                         if (rc)
1499                                 return rc;
1500                 }
1501         }
1502         return 0;
1503 }
1504
1505 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1506 {
1507         unsigned int i;
1508         int rc = 0;
1509
1510         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1511                 struct bnxt_tx_queue *txq;
1512                 struct bnxt_rx_queue *rxq;
1513                 struct bnxt_cp_ring_info *cpr;
1514
1515                 if (i >= bp->rx_cp_nr_rings) {
1516                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1517                         cpr = txq->cp_ring;
1518                 } else {
1519                         rxq = bp->rx_queues[i];
1520                         cpr = rxq->cp_ring;
1521                 }
1522
1523                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1524
1525                 if (rc)
1526                         return rc;
1527         }
1528         return rc;
1529 }
1530
1531 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1532 {
1533         uint16_t idx;
1534         uint32_t rc = 0;
1535
1536         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1537
1538                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1539                         RTE_LOG(ERR, PMD,
1540                                 "Attempt to free invalid ring group %d\n",
1541                                 idx);
1542                         continue;
1543                 }
1544
1545                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1546
1547                 if (rc)
1548                         return rc;
1549         }
1550         return rc;
1551 }
1552
1553 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1554                                 unsigned int idx __rte_unused)
1555 {
1556         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1557
1558         bnxt_hwrm_ring_free(bp, cp_ring,
1559                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1560         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1561         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1562         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1563                         sizeof(*cpr->cp_desc_ring));
1564         cpr->cp_raw_cons = 0;
1565 }
1566
1567 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1568 {
1569         unsigned int i;
1570         int rc = 0;
1571
1572         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1573                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1574                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1575                 struct bnxt_ring *ring = txr->tx_ring_struct;
1576                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1577                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1578
1579                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1580                         bnxt_hwrm_ring_free(bp, ring,
1581                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1582                         ring->fw_ring_id = INVALID_HW_RING_ID;
1583                         memset(txr->tx_desc_ring, 0,
1584                                         txr->tx_ring_struct->ring_size *
1585                                         sizeof(*txr->tx_desc_ring));
1586                         memset(txr->tx_buf_ring, 0,
1587                                         txr->tx_ring_struct->ring_size *
1588                                         sizeof(*txr->tx_buf_ring));
1589                         txr->tx_prod = 0;
1590                         txr->tx_cons = 0;
1591                 }
1592                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1593                         bnxt_free_cp_ring(bp, cpr, idx);
1594                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1595                 }
1596         }
1597
1598         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1599                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1600                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1601                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1602                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1603                 unsigned int idx = i + 1;
1604
1605                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1606                         bnxt_hwrm_ring_free(bp, ring,
1607                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1608                         ring->fw_ring_id = INVALID_HW_RING_ID;
1609                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1610                         memset(rxr->rx_desc_ring, 0,
1611                                         rxr->rx_ring_struct->ring_size *
1612                                         sizeof(*rxr->rx_desc_ring));
1613                         memset(rxr->rx_buf_ring, 0,
1614                                         rxr->rx_ring_struct->ring_size *
1615                                         sizeof(*rxr->rx_buf_ring));
1616                         rxr->rx_prod = 0;
1617                         memset(rxr->ag_buf_ring, 0,
1618                                         rxr->ag_ring_struct->ring_size *
1619                                         sizeof(*rxr->ag_buf_ring));
1620                         rxr->ag_prod = 0;
1621                 }
1622                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1623                         bnxt_free_cp_ring(bp, cpr, idx);
1624                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1625                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1626                 }
1627         }
1628
1629         /* Default completion ring */
1630         {
1631                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1632
1633                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1634                         bnxt_free_cp_ring(bp, cpr, 0);
1635                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1636                 }
1637         }
1638
1639         return rc;
1640 }
1641
1642 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1643 {
1644         uint16_t i;
1645         uint32_t rc = 0;
1646
1647         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1648                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1649                 if (rc)
1650                         return rc;
1651         }
1652         return rc;
1653 }
1654
1655 void bnxt_free_hwrm_resources(struct bnxt *bp)
1656 {
1657         /* Release memzone */
1658         rte_free(bp->hwrm_cmd_resp_addr);
1659         rte_free(bp->hwrm_short_cmd_req_addr);
1660         bp->hwrm_cmd_resp_addr = NULL;
1661         bp->hwrm_short_cmd_req_addr = NULL;
1662         bp->hwrm_cmd_resp_dma_addr = 0;
1663         bp->hwrm_short_cmd_req_dma_addr = 0;
1664 }
1665
1666 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1667 {
1668         struct rte_pci_device *pdev = bp->pdev;
1669         char type[RTE_MEMZONE_NAMESIZE];
1670
1671         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1672                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1673         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1674         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1675         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1676         if (bp->hwrm_cmd_resp_addr == NULL)
1677                 return -ENOMEM;
1678         bp->hwrm_cmd_resp_dma_addr =
1679                 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1680         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1681                 RTE_LOG(ERR, PMD,
1682                         "unable to map response address to physical memory\n");
1683                 return -ENOMEM;
1684         }
1685         rte_spinlock_init(&bp->hwrm_lock);
1686
1687         return 0;
1688 }
1689
1690 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1691 {
1692         struct bnxt_filter_info *filter;
1693         int rc = 0;
1694
1695         STAILQ_FOREACH(filter, &vnic->filter, next) {
1696                 rc = bnxt_hwrm_clear_filter(bp, filter);
1697                 if (rc)
1698                         break;
1699         }
1700         return rc;
1701 }
1702
1703 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1704 {
1705         struct bnxt_filter_info *filter;
1706         int rc = 0;
1707
1708         STAILQ_FOREACH(filter, &vnic->filter, next) {
1709                 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1710                 if (rc)
1711                         break;
1712         }
1713         return rc;
1714 }
1715
1716 void bnxt_free_tunnel_ports(struct bnxt *bp)
1717 {
1718         if (bp->vxlan_port_cnt)
1719                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1720                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1721         bp->vxlan_port = 0;
1722         if (bp->geneve_port_cnt)
1723                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1724                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1725         bp->geneve_port = 0;
1726 }
1727
1728 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1729 {
1730         struct bnxt_vnic_info *vnic;
1731         unsigned int i;
1732
1733         if (bp->vnic_info == NULL)
1734                 return;
1735
1736         vnic = &bp->vnic_info[0];
1737         if (BNXT_PF(bp))
1738                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1739
1740         /* VNIC resources */
1741         for (i = 0; i < bp->nr_vnics; i++) {
1742                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1743
1744                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1745
1746                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1747
1748                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1749
1750                 bnxt_hwrm_vnic_free(bp, vnic);
1751         }
1752         /* Ring resources */
1753         bnxt_free_all_hwrm_rings(bp);
1754         bnxt_free_all_hwrm_ring_grps(bp);
1755         bnxt_free_all_hwrm_stat_ctxs(bp);
1756         bnxt_free_tunnel_ports(bp);
1757 }
1758
1759 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1760 {
1761         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1762
1763         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1764                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1765
1766         switch (conf_link_speed) {
1767         case ETH_LINK_SPEED_10M_HD:
1768         case ETH_LINK_SPEED_100M_HD:
1769                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1770         }
1771         return hw_link_duplex;
1772 }
1773
1774 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1775 {
1776         uint16_t eth_link_speed = 0;
1777
1778         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1779                 return ETH_LINK_SPEED_AUTONEG;
1780
1781         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1782         case ETH_LINK_SPEED_100M:
1783         case ETH_LINK_SPEED_100M_HD:
1784                 eth_link_speed =
1785                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1786                 break;
1787         case ETH_LINK_SPEED_1G:
1788                 eth_link_speed =
1789                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1790                 break;
1791         case ETH_LINK_SPEED_2_5G:
1792                 eth_link_speed =
1793                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1794                 break;
1795         case ETH_LINK_SPEED_10G:
1796                 eth_link_speed =
1797                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1798                 break;
1799         case ETH_LINK_SPEED_20G:
1800                 eth_link_speed =
1801                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1802                 break;
1803         case ETH_LINK_SPEED_25G:
1804                 eth_link_speed =
1805                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1806                 break;
1807         case ETH_LINK_SPEED_40G:
1808                 eth_link_speed =
1809                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1810                 break;
1811         case ETH_LINK_SPEED_50G:
1812                 eth_link_speed =
1813                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1814                 break;
1815         default:
1816                 RTE_LOG(ERR, PMD,
1817                         "Unsupported link speed %d; default to AUTO\n",
1818                         conf_link_speed);
1819                 break;
1820         }
1821         return eth_link_speed;
1822 }
1823
1824 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1825                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1826                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1827                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1828
1829 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1830 {
1831         uint32_t one_speed;
1832
1833         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1834                 return 0;
1835
1836         if (link_speed & ETH_LINK_SPEED_FIXED) {
1837                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1838
1839                 if (one_speed & (one_speed - 1)) {
1840                         RTE_LOG(ERR, PMD,
1841                                 "Invalid advertised speeds (%u) for port %u\n",
1842                                 link_speed, port_id);
1843                         return -EINVAL;
1844                 }
1845                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1846                         RTE_LOG(ERR, PMD,
1847                                 "Unsupported advertised speed (%u) for port %u\n",
1848                                 link_speed, port_id);
1849                         return -EINVAL;
1850                 }
1851         } else {
1852                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1853                         RTE_LOG(ERR, PMD,
1854                                 "Unsupported advertised speeds (%u) for port %u\n",
1855                                 link_speed, port_id);
1856                         return -EINVAL;
1857                 }
1858         }
1859         return 0;
1860 }
1861
1862 static uint16_t
1863 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1864 {
1865         uint16_t ret = 0;
1866
1867         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1868                 if (bp->link_info.support_speeds)
1869                         return bp->link_info.support_speeds;
1870                 link_speed = BNXT_SUPPORTED_SPEEDS;
1871         }
1872
1873         if (link_speed & ETH_LINK_SPEED_100M)
1874                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1875         if (link_speed & ETH_LINK_SPEED_100M_HD)
1876                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1877         if (link_speed & ETH_LINK_SPEED_1G)
1878                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1879         if (link_speed & ETH_LINK_SPEED_2_5G)
1880                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1881         if (link_speed & ETH_LINK_SPEED_10G)
1882                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1883         if (link_speed & ETH_LINK_SPEED_20G)
1884                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1885         if (link_speed & ETH_LINK_SPEED_25G)
1886                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1887         if (link_speed & ETH_LINK_SPEED_40G)
1888                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1889         if (link_speed & ETH_LINK_SPEED_50G)
1890                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1891         return ret;
1892 }
1893
1894 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1895 {
1896         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1897
1898         switch (hw_link_speed) {
1899         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1900                 eth_link_speed = ETH_SPEED_NUM_100M;
1901                 break;
1902         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1903                 eth_link_speed = ETH_SPEED_NUM_1G;
1904                 break;
1905         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1906                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1907                 break;
1908         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1909                 eth_link_speed = ETH_SPEED_NUM_10G;
1910                 break;
1911         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1912                 eth_link_speed = ETH_SPEED_NUM_20G;
1913                 break;
1914         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1915                 eth_link_speed = ETH_SPEED_NUM_25G;
1916                 break;
1917         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1918                 eth_link_speed = ETH_SPEED_NUM_40G;
1919                 break;
1920         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1921                 eth_link_speed = ETH_SPEED_NUM_50G;
1922                 break;
1923         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1924         default:
1925                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1926                         hw_link_speed);
1927                 break;
1928         }
1929         return eth_link_speed;
1930 }
1931
1932 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1933 {
1934         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1935
1936         switch (hw_link_duplex) {
1937         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1938         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1939                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1940                 break;
1941         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1942                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1943                 break;
1944         default:
1945                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1946                         hw_link_duplex);
1947                 break;
1948         }
1949         return eth_link_duplex;
1950 }
1951
1952 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1953 {
1954         int rc = 0;
1955         struct bnxt_link_info *link_info = &bp->link_info;
1956
1957         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1958         if (rc) {
1959                 RTE_LOG(ERR, PMD,
1960                         "Get link config failed with rc %d\n", rc);
1961                 goto exit;
1962         }
1963         if (link_info->link_speed)
1964                 link->link_speed =
1965                         bnxt_parse_hw_link_speed(link_info->link_speed);
1966         else
1967                 link->link_speed = ETH_SPEED_NUM_NONE;
1968         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1969         link->link_status = link_info->link_up;
1970         link->link_autoneg = link_info->auto_mode ==
1971                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1972                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1973 exit:
1974         return rc;
1975 }
1976
1977 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1978 {
1979         int rc = 0;
1980         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1981         struct bnxt_link_info link_req;
1982         uint16_t speed;
1983
1984         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1985                 return 0;
1986
1987         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1988                         bp->eth_dev->data->port_id);
1989         if (rc)
1990                 goto error;
1991
1992         memset(&link_req, 0, sizeof(link_req));
1993         link_req.link_up = link_up;
1994         if (!link_up)
1995                 goto port_phy_cfg;
1996
1997         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1998         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1999         if (speed == 0) {
2000                 link_req.phy_flags |=
2001                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2002                 link_req.auto_mode =
2003                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
2004                 link_req.auto_link_speed_mask =
2005                         bnxt_parse_eth_link_speed_mask(bp,
2006                                                        dev_conf->link_speeds);
2007         } else {
2008                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2009                 link_req.link_speed = speed;
2010                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
2011         }
2012         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2013         link_req.auto_pause = bp->link_info.auto_pause;
2014         link_req.force_pause = bp->link_info.force_pause;
2015
2016 port_phy_cfg:
2017         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2018         if (rc) {
2019                 RTE_LOG(ERR, PMD,
2020                         "Set link config failed with rc %d\n", rc);
2021         }
2022
2023 error:
2024         return rc;
2025 }
2026
2027 /* JIRA 22088 */
2028 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2029 {
2030         struct hwrm_func_qcfg_input req = {0};
2031         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2032         int rc = 0;
2033
2034         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2035         req.fid = rte_cpu_to_le_16(0xffff);
2036
2037         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2038
2039         HWRM_CHECK_RESULT;
2040
2041         /* Hard Coded.. 0xfff VLAN ID mask */
2042         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2043
2044         switch (resp->port_partition_type) {
2045         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2046         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2047         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2048                 bp->port_partition_type = resp->port_partition_type;
2049                 break;
2050         default:
2051                 bp->port_partition_type = 0;
2052                 break;
2053         }
2054
2055         return rc;
2056 }
2057
2058 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2059                                    struct hwrm_func_qcaps_output *qcaps)
2060 {
2061         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2062         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2063                sizeof(qcaps->mac_address));
2064         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2065         qcaps->max_rx_rings = fcfg->num_rx_rings;
2066         qcaps->max_tx_rings = fcfg->num_tx_rings;
2067         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2068         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2069         qcaps->max_vfs = 0;
2070         qcaps->first_vf_id = 0;
2071         qcaps->max_vnics = fcfg->num_vnics;
2072         qcaps->max_decap_records = 0;
2073         qcaps->max_encap_records = 0;
2074         qcaps->max_tx_wm_flows = 0;
2075         qcaps->max_tx_em_flows = 0;
2076         qcaps->max_rx_wm_flows = 0;
2077         qcaps->max_rx_em_flows = 0;
2078         qcaps->max_flow_id = 0;
2079         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2080         qcaps->max_sp_tx_rings = 0;
2081         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2082 }
2083
2084 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2085 {
2086         struct hwrm_func_cfg_input req = {0};
2087         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2088         int rc;
2089
2090         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2091                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2092                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2093                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2094                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2095                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2096                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2097                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2098                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2099                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2100         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2101         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2102         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2103                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2104         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2105         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2106         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2107         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2108         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2109         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2110         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2111         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2112         req.fid = rte_cpu_to_le_16(0xffff);
2113
2114         HWRM_PREP(req, FUNC_CFG, -1, resp);
2115
2116         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2117         HWRM_CHECK_RESULT;
2118
2119         return rc;
2120 }
2121
2122 static void populate_vf_func_cfg_req(struct bnxt *bp,
2123                                      struct hwrm_func_cfg_input *req,
2124                                      int num_vfs)
2125 {
2126         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2127                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2128                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2129                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2130                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2131                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2132                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2133                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2134                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2135                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2136
2137         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2138                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2139         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2140                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2141         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2142                                                 (num_vfs + 1));
2143         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2144         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2145                                                (num_vfs + 1));
2146         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2147         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2148         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2149         /* TODO: For now, do not support VMDq/RFS on VFs. */
2150         req->num_vnics = rte_cpu_to_le_16(1);
2151         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2152                                                  (num_vfs + 1));
2153 }
2154
2155 static void add_random_mac_if_needed(struct bnxt *bp,
2156                                      struct hwrm_func_cfg_input *cfg_req,
2157                                      int vf)
2158 {
2159         struct ether_addr mac;
2160
2161         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2162                 return;
2163
2164         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2165                 cfg_req->enables |=
2166                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2167                 eth_random_addr(cfg_req->dflt_mac_addr);
2168                 bp->pf.vf_info[vf].random_mac = true;
2169         } else {
2170                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2171         }
2172 }
2173
2174 static void reserve_resources_from_vf(struct bnxt *bp,
2175                                       struct hwrm_func_cfg_input *cfg_req,
2176                                       int vf)
2177 {
2178         struct hwrm_func_qcaps_input req = {0};
2179         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2180         int rc;
2181
2182         /* Get the actual allocated values now */
2183         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
2184         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2185         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2186
2187         if (rc) {
2188                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2189                 copy_func_cfg_to_qcaps(cfg_req, resp);
2190         } else if (resp->error_code) {
2191                 rc = rte_le_to_cpu_16(resp->error_code);
2192                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2193                 copy_func_cfg_to_qcaps(cfg_req, resp);
2194         }
2195
2196         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2197         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2198         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2199         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2200         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2201         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2202         /*
2203          * TODO: While not supporting VMDq with VFs, max_vnics is always
2204          * forced to 1 in this case
2205          */
2206         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2207         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2208 }
2209
2210 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2211 {
2212         struct hwrm_func_qcfg_input req = {0};
2213         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2214         int rc;
2215
2216         /* Check for zero MAC address */
2217         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2218         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2219         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2220         if (rc) {
2221                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2222                 return -1;
2223         } else if (resp->error_code) {
2224                 rc = rte_le_to_cpu_16(resp->error_code);
2225                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2226                 return -1;
2227         }
2228         return rte_le_to_cpu_16(resp->vlan);
2229 }
2230
2231 static int update_pf_resource_max(struct bnxt *bp)
2232 {
2233         struct hwrm_func_qcfg_input req = {0};
2234         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2235         int rc;
2236
2237         /* And copy the allocated numbers into the pf struct */
2238         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2239         req.fid = rte_cpu_to_le_16(0xffff);
2240         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2241         HWRM_CHECK_RESULT;
2242
2243         /* Only TX ring value reflects actual allocation? TODO */
2244         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2245         bp->pf.evb_mode = resp->evb_mode;
2246
2247         return rc;
2248 }
2249
2250 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2251 {
2252         int rc;
2253
2254         if (!BNXT_PF(bp)) {
2255                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2256                 return -1;
2257         }
2258
2259         rc = bnxt_hwrm_func_qcaps(bp);
2260         if (rc)
2261                 return rc;
2262
2263         bp->pf.func_cfg_flags &=
2264                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2265                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2266         bp->pf.func_cfg_flags |=
2267                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2268         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2269         return rc;
2270 }
2271
2272 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2273 {
2274         struct hwrm_func_cfg_input req = {0};
2275         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2276         int i;
2277         size_t sz;
2278         int rc = 0;
2279         size_t req_buf_sz;
2280
2281         if (!BNXT_PF(bp)) {
2282                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2283                 return -1;
2284         }
2285
2286         rc = bnxt_hwrm_func_qcaps(bp);
2287
2288         if (rc)
2289                 return rc;
2290
2291         bp->pf.active_vfs = num_vfs;
2292
2293         /*
2294          * First, configure the PF to only use one TX ring.  This ensures that
2295          * there are enough rings for all VFs.
2296          *
2297          * If we don't do this, when we call func_alloc() later, we will lock
2298          * extra rings to the PF that won't be available during func_cfg() of
2299          * the VFs.
2300          *
2301          * This has been fixed with firmware versions above 20.6.54
2302          */
2303         bp->pf.func_cfg_flags &=
2304                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2305                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2306         bp->pf.func_cfg_flags |=
2307                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2308         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2309         if (rc)
2310                 return rc;
2311
2312         /*
2313          * Now, create and register a buffer to hold forwarded VF requests
2314          */
2315         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2316         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2317                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2318         if (bp->pf.vf_req_buf == NULL) {
2319                 rc = -ENOMEM;
2320                 goto error_free;
2321         }
2322         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2323                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2324         for (i = 0; i < num_vfs; i++)
2325                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2326                                         (i * HWRM_MAX_REQ_LEN);
2327
2328         rc = bnxt_hwrm_func_buf_rgtr(bp);
2329         if (rc)
2330                 goto error_free;
2331
2332         populate_vf_func_cfg_req(bp, &req, num_vfs);
2333
2334         bp->pf.active_vfs = 0;
2335         for (i = 0; i < num_vfs; i++) {
2336                 add_random_mac_if_needed(bp, &req, i);
2337
2338                 HWRM_PREP(req, FUNC_CFG, -1, resp);
2339                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2340                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2341                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2342
2343                 /* Clear enable flag for next pass */
2344                 req.enables &= ~rte_cpu_to_le_32(
2345                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2346
2347                 if (rc || resp->error_code) {
2348                         RTE_LOG(ERR, PMD,
2349                                 "Failed to initizlie VF %d\n", i);
2350                         RTE_LOG(ERR, PMD,
2351                                 "Not all VFs available. (%d, %d)\n",
2352                                 rc, resp->error_code);
2353                         break;
2354                 }
2355
2356                 reserve_resources_from_vf(bp, &req, i);
2357                 bp->pf.active_vfs++;
2358                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2359         }
2360
2361         /*
2362          * Now configure the PF to use "the rest" of the resources
2363          * We're using STD_TX_RING_MODE here though which will limit the TX
2364          * rings.  This will allow QoS to function properly.  Not setting this
2365          * will cause PF rings to break bandwidth settings.
2366          */
2367         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2368         if (rc)
2369                 goto error_free;
2370
2371         rc = update_pf_resource_max(bp);
2372         if (rc)
2373                 goto error_free;
2374
2375         return rc;
2376
2377 error_free:
2378         bnxt_hwrm_func_buf_unrgtr(bp);
2379         return rc;
2380 }
2381
2382 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2383 {
2384         struct hwrm_func_cfg_input req = {0};
2385         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2386         int rc;
2387
2388         HWRM_PREP(req, FUNC_CFG, -1, resp);
2389
2390         req.fid = rte_cpu_to_le_16(0xffff);
2391         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2392         req.evb_mode = bp->pf.evb_mode;
2393
2394         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2395         HWRM_CHECK_RESULT;
2396
2397         return rc;
2398 }
2399
2400 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2401                                 uint8_t tunnel_type)
2402 {
2403         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2404         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2405         int rc = 0;
2406
2407         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2408         req.tunnel_type = tunnel_type;
2409         req.tunnel_dst_port_val = port;
2410         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2411         HWRM_CHECK_RESULT;
2412
2413         switch (tunnel_type) {
2414         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2415                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2416                 bp->vxlan_port = port;
2417                 break;
2418         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2419                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2420                 bp->geneve_port = port;
2421                 break;
2422         default:
2423                 break;
2424         }
2425         return rc;
2426 }
2427
2428 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2429                                 uint8_t tunnel_type)
2430 {
2431         struct hwrm_tunnel_dst_port_free_input req = {0};
2432         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2433         int rc = 0;
2434
2435         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2436         req.tunnel_type = tunnel_type;
2437         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2438         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2439         HWRM_CHECK_RESULT;
2440
2441         return rc;
2442 }
2443
2444 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2445                                         uint32_t flags)
2446 {
2447         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2448         struct hwrm_func_cfg_input req = {0};
2449         int rc;
2450
2451         HWRM_PREP(req, FUNC_CFG, -1, resp);
2452         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2453         req.flags = rte_cpu_to_le_32(flags);
2454         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2455         HWRM_CHECK_RESULT;
2456
2457         return rc;
2458 }
2459
2460 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2461 {
2462         uint32_t *flag = flagp;
2463
2464         vnic->flags = *flag;
2465 }
2466
2467 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2468 {
2469         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2470 }
2471
2472 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2473 {
2474         int rc = 0;
2475         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2476         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2477
2478         HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2479
2480         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2481         req.req_buf_page_size = rte_cpu_to_le_16(
2482                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2483         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2484         req.req_buf_page_addr[0] =
2485                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2486         if (req.req_buf_page_addr[0] == 0) {
2487                 RTE_LOG(ERR, PMD,
2488                         "unable to map buffer address to physical memory\n");
2489                 return -ENOMEM;
2490         }
2491
2492         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2493
2494         HWRM_CHECK_RESULT;
2495
2496         return rc;
2497 }
2498
2499 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2500 {
2501         int rc = 0;
2502         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2503         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2504
2505         HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2506
2507         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2508
2509         HWRM_CHECK_RESULT;
2510
2511         return rc;
2512 }
2513
2514 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2515 {
2516         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2517         struct hwrm_func_cfg_input req = {0};
2518         int rc;
2519
2520         HWRM_PREP(req, FUNC_CFG, -1, resp);
2521         req.fid = rte_cpu_to_le_16(0xffff);
2522         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2523         req.enables = rte_cpu_to_le_32(
2524                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2525         req.async_event_cr = rte_cpu_to_le_16(
2526                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2527         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2528         HWRM_CHECK_RESULT;
2529
2530         return rc;
2531 }
2532
2533 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2534 {
2535         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2536         struct hwrm_func_vf_cfg_input req = {0};
2537         int rc;
2538
2539         HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2540         req.enables = rte_cpu_to_le_32(
2541                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2542         req.async_event_cr = rte_cpu_to_le_16(
2543                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2544         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2545         HWRM_CHECK_RESULT;
2546
2547         return rc;
2548 }
2549
2550 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2551 {
2552         struct hwrm_func_cfg_input req = {0};
2553         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2554         uint16_t dflt_vlan, fid;
2555         uint32_t func_cfg_flags;
2556         int rc = 0;
2557
2558         HWRM_PREP(req, FUNC_CFG, -1, resp);
2559
2560         if (is_vf) {
2561                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2562                 fid = bp->pf.vf_info[vf].fid;
2563                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2564         } else {
2565                 fid = rte_cpu_to_le_16(0xffff);
2566                 func_cfg_flags = bp->pf.func_cfg_flags;
2567                 dflt_vlan = bp->vlan;
2568         }
2569
2570         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2571         req.fid = rte_cpu_to_le_16(fid);
2572         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2573         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2574
2575         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2576         HWRM_CHECK_RESULT;
2577
2578         return rc;
2579 }
2580
2581 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2582                         uint16_t max_bw, uint16_t enables)
2583 {
2584         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2585         struct hwrm_func_cfg_input req = {0};
2586         int rc;
2587
2588         HWRM_PREP(req, FUNC_CFG, -1, resp);
2589         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2590         req.enables |= rte_cpu_to_le_32(enables);
2591         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2592         req.max_bw = rte_cpu_to_le_32(max_bw);
2593         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2594         HWRM_CHECK_RESULT;
2595
2596         return rc;
2597 }
2598
2599 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2600 {
2601         struct hwrm_func_cfg_input req = {0};
2602         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2603         int rc = 0;
2604
2605         HWRM_PREP(req, FUNC_CFG, -1, resp);
2606         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2607         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2608         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2609         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2610
2611         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2612         HWRM_CHECK_RESULT;
2613
2614         return rc;
2615 }
2616
2617 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2618                               void *encaped, size_t ec_size)
2619 {
2620         int rc = 0;
2621         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2622         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2623
2624         if (ec_size > sizeof(req.encap_request))
2625                 return -1;
2626
2627         HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2628
2629         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2630         memcpy(req.encap_request, encaped, ec_size);
2631
2632         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2633
2634         HWRM_CHECK_RESULT;
2635
2636         return rc;
2637 }
2638
2639 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2640                                        struct ether_addr *mac)
2641 {
2642         struct hwrm_func_qcfg_input req = {0};
2643         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2644         int rc;
2645
2646         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2647         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2648         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2649
2650         HWRM_CHECK_RESULT;
2651
2652         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2653         return rc;
2654 }
2655
2656 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2657                             void *encaped, size_t ec_size)
2658 {
2659         int rc = 0;
2660         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2661         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2662
2663         if (ec_size > sizeof(req.encap_request))
2664                 return -1;
2665
2666         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2667
2668         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2669         memcpy(req.encap_request, encaped, ec_size);
2670
2671         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2672
2673         HWRM_CHECK_RESULT;
2674
2675         return rc;
2676 }
2677
2678 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2679                          struct rte_eth_stats *stats)
2680 {
2681         int rc = 0;
2682         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2683         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2684
2685         HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
2686
2687         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2688
2689         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2690
2691         HWRM_CHECK_RESULT;
2692
2693         stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2694         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2695         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2696         stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2697         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2698         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2699
2700         stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2701         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2702         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2703         stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2704         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2705         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2706
2707         stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2708         stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2709         stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2710
2711         return rc;
2712 }
2713
2714 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2715 {
2716         struct hwrm_port_qstats_input req = {0};
2717         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2718         struct bnxt_pf_info *pf = &bp->pf;
2719         int rc;
2720
2721         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2722                 return 0;
2723
2724         HWRM_PREP(req, PORT_QSTATS, -1, resp);
2725         req.port_id = rte_cpu_to_le_16(pf->port_id);
2726         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2727         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2728         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2729         HWRM_CHECK_RESULT;
2730         return rc;
2731 }
2732
2733 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2734 {
2735         struct hwrm_port_clr_stats_input req = {0};
2736         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2737         struct bnxt_pf_info *pf = &bp->pf;
2738         int rc;
2739
2740         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2741                 return 0;
2742
2743         HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2744         req.port_id = rte_cpu_to_le_16(pf->port_id);
2745         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2746         HWRM_CHECK_RESULT;
2747         return rc;
2748 }
2749
2750 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2751 {
2752         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2753         struct hwrm_port_led_qcaps_input req = {0};
2754         int rc;
2755
2756         if (BNXT_VF(bp))
2757                 return 0;
2758
2759         HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
2760         req.port_id = bp->pf.port_id;
2761         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2762         HWRM_CHECK_RESULT;
2763
2764         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2765                 unsigned int i;
2766
2767                 bp->num_leds = resp->num_leds;
2768                 memcpy(bp->leds, &resp->led0_id,
2769                         sizeof(bp->leds[0]) * bp->num_leds);
2770                 for (i = 0; i < bp->num_leds; i++) {
2771                         struct bnxt_led_info *led = &bp->leds[i];
2772
2773                         uint16_t caps = led->led_state_caps;
2774
2775                         if (!led->led_group_id ||
2776                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2777                                 bp->num_leds = 0;
2778                                 break;
2779                         }
2780                 }
2781         }
2782         return rc;
2783 }
2784
2785 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2786 {
2787         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2788         struct hwrm_port_led_cfg_input req = {0};
2789         struct bnxt_led_cfg *led_cfg;
2790         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2791         uint16_t duration = 0;
2792         int rc, i;
2793
2794         if (!bp->num_leds || BNXT_VF(bp))
2795                 return -EOPNOTSUPP;
2796
2797         HWRM_PREP(req, PORT_LED_CFG, -1, resp);
2798         if (led_on) {
2799                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2800                 duration = rte_cpu_to_le_16(500);
2801         }
2802         req.port_id = bp->pf.port_id;
2803         req.num_leds = bp->num_leds;
2804         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2805         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2806                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2807                 led_cfg->led_id = bp->leds[i].led_id;
2808                 led_cfg->led_state = led_state;
2809                 led_cfg->led_blink_on = duration;
2810                 led_cfg->led_blink_off = duration;
2811                 led_cfg->led_group_id = bp->leds[i].led_group_id;
2812         }
2813
2814         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2815         HWRM_CHECK_RESULT;
2816
2817         return rc;
2818 }
2819
2820 static void
2821 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
2822 {
2823         uint32_t *count = cbdata;
2824
2825         *count = *count + 1;
2826 }
2827
2828 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
2829                                      struct bnxt_vnic_info *vnic __rte_unused)
2830 {
2831         return 0;
2832 }
2833
2834 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
2835 {
2836         uint32_t count = 0;
2837
2838         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
2839             &count, bnxt_vnic_count_hwrm_stub);
2840
2841         return count;
2842 }
2843
2844 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2845                                         uint16_t *vnic_ids)
2846 {
2847         struct hwrm_func_vf_vnic_ids_query_input req = {0};
2848         struct hwrm_func_vf_vnic_ids_query_output *resp =
2849                                                 bp->hwrm_cmd_resp_addr;
2850         int rc;
2851
2852         /* First query all VNIC ids */
2853         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
2854
2855         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2856         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2857         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2858
2859         if (req.vnic_id_tbl_addr == 0) {
2860                 RTE_LOG(ERR, PMD,
2861                 "unable to map VNIC ID table address to physical memory\n");
2862                 return -ENOMEM;
2863         }
2864         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2865         if (rc) {
2866                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2867                 return -1;
2868         } else if (resp->error_code) {
2869                 rc = rte_le_to_cpu_16(resp->error_code);
2870                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2871                 return -1;
2872         }
2873
2874         return rte_le_to_cpu_32(resp->vnic_id_cnt);
2875 }
2876
2877 /*
2878  * This function queries the VNIC IDs  for a specified VF. It then calls
2879  * the vnic_cb to update the necessary field in vnic_info with cbdata.
2880  * Then it calls the hwrm_cb function to program this new vnic configuration.
2881  */
2882 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
2883         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
2884         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
2885 {
2886         struct bnxt_vnic_info vnic;
2887         int rc = 0;
2888         int i, num_vnic_ids;
2889         uint16_t *vnic_ids;
2890         size_t vnic_id_sz;
2891         size_t sz;
2892
2893         /* First query all VNIC ids */
2894         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2895         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2896                         RTE_CACHE_LINE_SIZE);
2897         if (vnic_ids == NULL) {
2898                 rc = -ENOMEM;
2899                 return rc;
2900         }
2901         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2902                 rte_mem_lock_page(((char *)vnic_ids) + sz);
2903
2904         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2905
2906         if (num_vnic_ids < 0)
2907                 return num_vnic_ids;
2908
2909         /* Retrieve VNIC, update bd_stall then update */
2910
2911         for (i = 0; i < num_vnic_ids; i++) {
2912                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2913                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2914                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
2915                 if (rc)
2916                         break;
2917                 if (vnic.mru <= 4)      /* Indicates unallocated */
2918                         continue;
2919
2920                 vnic_cb(&vnic, cbdata);
2921
2922                 rc = hwrm_cb(bp, &vnic);
2923                 if (rc)
2924                         break;
2925         }
2926
2927         rte_free(vnic_ids);
2928
2929         return rc;
2930 }
2931
2932 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
2933                                               bool on)
2934 {
2935         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2936         struct hwrm_func_cfg_input req = {0};
2937         int rc;
2938
2939         HWRM_PREP(req, FUNC_CFG, -1, resp);
2940         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2941         req.enables |= rte_cpu_to_le_32(
2942                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
2943         req.vlan_antispoof_mode = on ?
2944                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
2945                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
2946         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2947         HWRM_CHECK_RESULT;
2948
2949         return rc;
2950 }
2951
2952 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
2953 {
2954         struct bnxt_vnic_info vnic;
2955         uint16_t *vnic_ids;
2956         size_t vnic_id_sz;
2957         int num_vnic_ids, i;
2958         size_t sz;
2959         int rc;
2960
2961         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2962         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2963                         RTE_CACHE_LINE_SIZE);
2964         if (vnic_ids == NULL) {
2965                 rc = -ENOMEM;
2966                 return rc;
2967         }
2968
2969         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2970                 rte_mem_lock_page(((char *)vnic_ids) + sz);
2971
2972         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2973         if (rc <= 0)
2974                 goto exit;
2975         num_vnic_ids = rc;
2976
2977         /*
2978          * Loop through to find the default VNIC ID.
2979          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
2980          * by sending the hwrm_func_qcfg command to the firmware.
2981          */
2982         for (i = 0; i < num_vnic_ids; i++) {
2983                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2984                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2985                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
2986                                         bp->pf.first_vf_id + vf);
2987                 if (rc)
2988                         goto exit;
2989                 if (vnic.func_default) {
2990                         rte_free(vnic_ids);
2991                         return vnic.fw_vnic_id;
2992                 }
2993         }
2994         /* Could not find a default VNIC. */
2995         RTE_LOG(ERR, PMD, "No default VNIC\n");
2996 exit:
2997         rte_free(vnic_ids);
2998         return -1;
2999 }