net/bnxt: update trusted VF status only when it changes
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_filter.h"
17 #include "bnxt_hwrm.h"
18 #include "bnxt_rxq.h"
19 #include "bnxt_rxr.h"
20 #include "bnxt_ring.h"
21 #include "bnxt_txq.h"
22 #include "bnxt_txr.h"
23 #include "bnxt_vnic.h"
24 #include "hsi_struct_def_dpdk.h"
25
26 #include <rte_io.h>
27
28 #define HWRM_CMD_TIMEOUT                6000000
29 #define HWRM_SHORT_CMD_TIMEOUT          50000
30 #define HWRM_SPEC_CODE_1_8_3            0x10803
31 #define HWRM_VERSION_1_9_1              0x10901
32 #define HWRM_VERSION_1_9_2              0x10903
33
34 struct bnxt_plcmodes_cfg {
35         uint32_t        flags;
36         uint16_t        jumbo_thresh;
37         uint16_t        hds_offset;
38         uint16_t        hds_threshold;
39 };
40
41 static int page_getenum(size_t size)
42 {
43         if (size <= 1 << 4)
44                 return 4;
45         if (size <= 1 << 12)
46                 return 12;
47         if (size <= 1 << 13)
48                 return 13;
49         if (size <= 1 << 16)
50                 return 16;
51         if (size <= 1 << 21)
52                 return 21;
53         if (size <= 1 << 22)
54                 return 22;
55         if (size <= 1 << 30)
56                 return 30;
57         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
58         return sizeof(void *) * 8 - 1;
59 }
60
61 static int page_roundup(size_t size)
62 {
63         return 1 << page_getenum(size);
64 }
65
66 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
67                                   uint8_t *pg_attr,
68                                   uint64_t *pg_dir)
69 {
70         if (rmem->nr_pages > 1) {
71                 *pg_attr = 1;
72                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
73         } else {
74                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
75         }
76 }
77
78 /*
79  * HWRM Functions (sent to HWRM)
80  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
81  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
82  * command was failed by the ChiMP.
83  */
84
85 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
86                                   uint32_t msg_len, bool use_kong_mb)
87 {
88         unsigned int i;
89         struct input *req = msg;
90         struct output *resp = bp->hwrm_cmd_resp_addr;
91         uint32_t *data = msg;
92         uint8_t *bar;
93         uint8_t *valid;
94         uint16_t max_req_len = bp->max_req_len;
95         struct hwrm_short_input short_input = { 0 };
96         uint16_t bar_offset = use_kong_mb ?
97                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
98         uint16_t mb_trigger_offset = use_kong_mb ?
99                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
100         uint32_t timeout;
101
102         /* Do not send HWRM commands to firmware in error state */
103         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
104                 return 0;
105
106         /* For VER_GET command, set timeout as 50ms */
107         if (rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
108                 timeout = HWRM_SHORT_CMD_TIMEOUT;
109         else
110                 timeout = HWRM_CMD_TIMEOUT;
111
112         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
113             msg_len > bp->max_req_len) {
114                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
115
116                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
117                 memcpy(short_cmd_req, req, msg_len);
118
119                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
120                 short_input.signature = rte_cpu_to_le_16(
121                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
122                 short_input.size = rte_cpu_to_le_16(msg_len);
123                 short_input.req_addr =
124                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
125
126                 data = (uint32_t *)&short_input;
127                 msg_len = sizeof(short_input);
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
147         rte_write32(1, bar);
148         /*
149          * Make sure the channel doorbell ring command complete before
150          * reading the response to avoid getting stale or invalid
151          * responses.
152          */
153         rte_io_mb();
154
155         /* Poll for the valid bit */
156         for (i = 0; i < timeout; i++) {
157                 /* Sanity check on the resp->resp_len */
158                 rte_cio_rmb();
159                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
160                         /* Last byte of resp contains the valid key */
161                         valid = (uint8_t *)resp + resp->resp_len - 1;
162                         if (*valid == HWRM_RESP_VALID_KEY)
163                                 break;
164                 }
165                 rte_delay_us(1);
166         }
167
168         if (i >= timeout) {
169                 /* Suppress VER_GET timeout messages during reset recovery */
170                 if (bp->flags & BNXT_FLAG_FW_RESET &&
171                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
172                         return -ETIMEDOUT;
173
174                 PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
175                             req->req_type);
176                 return -ETIMEDOUT;
177         }
178         return 0;
179 }
180
181 /*
182  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
183  * spinlock, and does initial processing.
184  *
185  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
186  * releases the spinlock only if it returns.  If the regular int return codes
187  * are not used by the function, HWRM_CHECK_RESULT() should not be used
188  * directly, rather it should be copied and modified to suit the function.
189  *
190  * HWRM_UNLOCK() must be called after all response processing is completed.
191  */
192 #define HWRM_PREP(req, type, kong) do { \
193         rte_spinlock_lock(&bp->hwrm_lock); \
194         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
195         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
196         req.cmpl_ring = rte_cpu_to_le_16(-1); \
197         req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
198                 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
199         req.target_id = rte_cpu_to_le_16(0xffff); \
200         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
201 } while (0)
202
203 #define HWRM_CHECK_RESULT_SILENT() do {\
204         if (rc) { \
205                 rte_spinlock_unlock(&bp->hwrm_lock); \
206                 return rc; \
207         } \
208         if (resp->error_code) { \
209                 rc = rte_le_to_cpu_16(resp->error_code); \
210                 rte_spinlock_unlock(&bp->hwrm_lock); \
211                 return rc; \
212         } \
213 } while (0)
214
215 #define HWRM_CHECK_RESULT() do {\
216         if (rc) { \
217                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
218                 rte_spinlock_unlock(&bp->hwrm_lock); \
219                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
220                         rc = -EACCES; \
221                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
222                         rc = -ENOSPC; \
223                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
224                         rc = -EINVAL; \
225                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
226                         rc = -ENOTSUP; \
227                 else if (rc > 0) \
228                         rc = -EIO; \
229                 return rc; \
230         } \
231         if (resp->error_code) { \
232                 rc = rte_le_to_cpu_16(resp->error_code); \
233                 if (resp->resp_len >= 16) { \
234                         struct hwrm_err_output *tmp_hwrm_err_op = \
235                                                 (void *)resp; \
236                         PMD_DRV_LOG(ERR, \
237                                 "error %d:%d:%08x:%04x\n", \
238                                 rc, tmp_hwrm_err_op->cmd_err, \
239                                 rte_le_to_cpu_32(\
240                                         tmp_hwrm_err_op->opaque_0), \
241                                 rte_le_to_cpu_16(\
242                                         tmp_hwrm_err_op->opaque_1)); \
243                 } else { \
244                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
245                 } \
246                 rte_spinlock_unlock(&bp->hwrm_lock); \
247                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
248                         rc = -EACCES; \
249                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
250                         rc = -ENOSPC; \
251                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
252                         rc = -EINVAL; \
253                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
254                         rc = -ENOTSUP; \
255                 else if (rc > 0) \
256                         rc = -EIO; \
257                 return rc; \
258         } \
259 } while (0)
260
261 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
262
263 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
264 {
265         int rc = 0;
266         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
267         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
268
269         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
270         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
271         req.mask = 0;
272
273         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
274
275         HWRM_CHECK_RESULT();
276         HWRM_UNLOCK();
277
278         return rc;
279 }
280
281 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
282                                  struct bnxt_vnic_info *vnic,
283                                  uint16_t vlan_count,
284                                  struct bnxt_vlan_table_entry *vlan_table)
285 {
286         int rc = 0;
287         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
288         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
289         uint32_t mask = 0;
290
291         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
292                 return rc;
293
294         HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
295         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
296
297         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
298                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
299         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
300                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
301
302         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
303                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
304
305         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
306                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
307         } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
308                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
309                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
310                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
311         }
312         if (vlan_table) {
313                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
314                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
315                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
316                          rte_mem_virt2iova(vlan_table));
317                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
318         }
319         req.mask = rte_cpu_to_le_32(mask);
320
321         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
322
323         HWRM_CHECK_RESULT();
324         HWRM_UNLOCK();
325
326         return rc;
327 }
328
329 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
330                         uint16_t vlan_count,
331                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
332 {
333         int rc = 0;
334         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
335         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
336                                                 bp->hwrm_cmd_resp_addr;
337
338         /*
339          * Older HWRM versions did not support this command, and the set_rx_mask
340          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
341          * removed from set_rx_mask call, and this command was added.
342          *
343          * This command is also present from 1.7.8.11 and higher,
344          * as well as 1.7.8.0
345          */
346         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
347                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
348                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
349                                         (11)))
350                                 return 0;
351                 }
352         }
353         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
354         req.fid = rte_cpu_to_le_16(fid);
355
356         req.vlan_tag_mask_tbl_addr =
357                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
358         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
359
360         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
361
362         HWRM_CHECK_RESULT();
363         HWRM_UNLOCK();
364
365         return rc;
366 }
367
368 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
369                            struct bnxt_filter_info *filter)
370 {
371         int rc = 0;
372         struct bnxt_filter_info *l2_filter = filter;
373         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
374         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
375
376         if (filter->fw_l2_filter_id == UINT64_MAX)
377                 return 0;
378
379         if (filter->matching_l2_fltr_ptr)
380                 l2_filter = filter->matching_l2_fltr_ptr;
381
382         PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
383                     filter, l2_filter, l2_filter->l2_ref_cnt);
384
385         if (l2_filter->l2_ref_cnt > 0)
386                 l2_filter->l2_ref_cnt--;
387
388         if (l2_filter->l2_ref_cnt > 0)
389                 return 0;
390
391         HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
392
393         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
394
395         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
396
397         HWRM_CHECK_RESULT();
398         HWRM_UNLOCK();
399
400         filter->fw_l2_filter_id = UINT64_MAX;
401
402         return 0;
403 }
404
405 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
406                          uint16_t dst_id,
407                          struct bnxt_filter_info *filter)
408 {
409         int rc = 0;
410         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
411         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
412         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
413         const struct rte_eth_vmdq_rx_conf *conf =
414                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
415         uint32_t enables = 0;
416         uint16_t j = dst_id - 1;
417
418         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
419         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
420             conf->pool_map[j].pools & (1UL << j)) {
421                 PMD_DRV_LOG(DEBUG,
422                         "Add vlan %u to vmdq pool %u\n",
423                         conf->pool_map[j].vlan_id, j);
424
425                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
426                 filter->enables |=
427                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
428                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
429         }
430
431         if (filter->fw_l2_filter_id != UINT64_MAX)
432                 bnxt_hwrm_clear_l2_filter(bp, filter);
433
434         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
435
436         req.flags = rte_cpu_to_le_32(filter->flags);
437
438         enables = filter->enables |
439               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
440         req.dst_id = rte_cpu_to_le_16(dst_id);
441
442         if (enables &
443             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
444                 memcpy(req.l2_addr, filter->l2_addr,
445                        RTE_ETHER_ADDR_LEN);
446         if (enables &
447             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
448                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
449                        RTE_ETHER_ADDR_LEN);
450         if (enables &
451             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
452                 req.l2_ovlan = filter->l2_ovlan;
453         if (enables &
454             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
455                 req.l2_ivlan = filter->l2_ivlan;
456         if (enables &
457             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
458                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
459         if (enables &
460             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
461                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
462         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
463                 req.src_id = rte_cpu_to_le_32(filter->src_id);
464         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
465                 req.src_type = filter->src_type;
466         if (filter->pri_hint) {
467                 req.pri_hint = filter->pri_hint;
468                 req.l2_filter_id_hint =
469                         rte_cpu_to_le_64(filter->l2_filter_id_hint);
470         }
471
472         req.enables = rte_cpu_to_le_32(enables);
473
474         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
475
476         HWRM_CHECK_RESULT();
477
478         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
479         HWRM_UNLOCK();
480
481         return rc;
482 }
483
484 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
485 {
486         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
487         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
488         uint32_t flags = 0;
489         int rc;
490
491         if (!ptp)
492                 return 0;
493
494         HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
495
496         if (ptp->rx_filter)
497                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
498         else
499                 flags |=
500                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
501         if (ptp->tx_tstamp_en)
502                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
503         else
504                 flags |=
505                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
506         req.flags = rte_cpu_to_le_32(flags);
507         req.enables = rte_cpu_to_le_32
508                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
509         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
510
511         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
512         HWRM_UNLOCK();
513
514         return rc;
515 }
516
517 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
518 {
519         int rc = 0;
520         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
521         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
522         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
523
524 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
525         if (ptp)
526                 return 0;
527
528         HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
529
530         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
531
532         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
533
534         HWRM_CHECK_RESULT();
535
536         if (!BNXT_CHIP_THOR(bp) &&
537             !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
538                 return 0;
539
540         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
541                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
542
543         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
544         if (!ptp)
545                 return -ENOMEM;
546
547         if (!BNXT_CHIP_THOR(bp)) {
548                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
549                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
550                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
551                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
552                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
553                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
554                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
555                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
556                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
557                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
558                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
559                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
560                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
561                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
562                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
563                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
564                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
565                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
566         }
567
568         ptp->bp = bp;
569         bp->ptp_cfg = ptp;
570
571         return 0;
572 }
573
574 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
575 {
576         int rc = 0;
577         struct hwrm_func_qcaps_input req = {.req_type = 0 };
578         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
579         uint16_t new_max_vfs;
580         uint32_t flags;
581         int i;
582
583         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
584
585         req.fid = rte_cpu_to_le_16(0xffff);
586
587         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
588
589         HWRM_CHECK_RESULT();
590
591         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
592         flags = rte_le_to_cpu_32(resp->flags);
593         if (BNXT_PF(bp)) {
594                 bp->pf.port_id = resp->port_id;
595                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
596                 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
597                 new_max_vfs = bp->pdev->max_vfs;
598                 if (new_max_vfs != bp->pf.max_vfs) {
599                         if (bp->pf.vf_info)
600                                 rte_free(bp->pf.vf_info);
601                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
602                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
603                         bp->pf.max_vfs = new_max_vfs;
604                         for (i = 0; i < new_max_vfs; i++) {
605                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
606                                 bp->pf.vf_info[i].vlan_table =
607                                         rte_zmalloc("VF VLAN table",
608                                                     getpagesize(),
609                                                     getpagesize());
610                                 if (bp->pf.vf_info[i].vlan_table == NULL)
611                                         PMD_DRV_LOG(ERR,
612                                         "Fail to alloc VLAN table for VF %d\n",
613                                         i);
614                                 else
615                                         rte_mem_lock_page(
616                                                 bp->pf.vf_info[i].vlan_table);
617                                 bp->pf.vf_info[i].vlan_as_table =
618                                         rte_zmalloc("VF VLAN AS table",
619                                                     getpagesize(),
620                                                     getpagesize());
621                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
622                                         PMD_DRV_LOG(ERR,
623                                         "Alloc VLAN AS table for VF %d fail\n",
624                                         i);
625                                 else
626                                         rte_mem_lock_page(
627                                                bp->pf.vf_info[i].vlan_as_table);
628                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
629                         }
630                 }
631         }
632
633         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
634         memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
635         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
636         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
637         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
638         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
639         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
640         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
641         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
642         if (!BNXT_CHIP_THOR(bp))
643                 bp->max_l2_ctx += bp->max_rx_em_flows;
644         /* TODO: For now, do not support VMDq/RFS on VFs. */
645         if (BNXT_PF(bp)) {
646                 if (bp->pf.max_vfs)
647                         bp->max_vnics = 1;
648                 else
649                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
650         } else {
651                 bp->max_vnics = 1;
652         }
653         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
654         if (BNXT_PF(bp)) {
655                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
656                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
657                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
658                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
659                         HWRM_UNLOCK();
660                         bnxt_hwrm_ptp_qcfg(bp);
661                 }
662         }
663
664         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
665                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
666
667         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
668                 bp->flags |= BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
669                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
670         } else {
671                 bp->flags &= ~BNXT_FLAG_FW_CAP_ERROR_RECOVERY;
672         }
673
674         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
675                 bp->flags |= BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
676         else
677                 bp->flags &= ~BNXT_FLAG_FW_CAP_ERR_RECOVER_RELOAD;
678
679         HWRM_UNLOCK();
680
681         return rc;
682 }
683
684 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
685 {
686         int rc;
687
688         rc = __bnxt_hwrm_func_qcaps(bp);
689         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
690                 rc = bnxt_alloc_ctx_mem(bp);
691                 if (rc)
692                         return rc;
693
694                 rc = bnxt_hwrm_func_resc_qcaps(bp);
695                 if (!rc)
696                         bp->flags |= BNXT_FLAG_NEW_RM;
697         }
698
699         return rc;
700 }
701
702 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
703 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
704 {
705         int rc = 0;
706         struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
707         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
708
709         HWRM_PREP(req, VNIC_QCAPS, BNXT_USE_CHIMP_MB);
710
711         req.target_id = rte_cpu_to_le_16(0xffff);
712
713         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
714
715         HWRM_CHECK_RESULT();
716
717         if (rte_le_to_cpu_32(resp->flags) &
718             HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
719                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
720                 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
721         }
722
723         bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
724
725         HWRM_UNLOCK();
726
727         return rc;
728 }
729
730 int bnxt_hwrm_func_reset(struct bnxt *bp)
731 {
732         int rc = 0;
733         struct hwrm_func_reset_input req = {.req_type = 0 };
734         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
735
736         HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
737
738         req.enables = rte_cpu_to_le_32(0);
739
740         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
741
742         HWRM_CHECK_RESULT();
743         HWRM_UNLOCK();
744
745         return rc;
746 }
747
748 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
749 {
750         int rc;
751         uint32_t flags = 0;
752         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
753         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
754
755         if (bp->flags & BNXT_FLAG_REGISTERED)
756                 return 0;
757
758         flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
759         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
760                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
761
762         /* PFs and trusted VFs should indicate the support of the
763          * Master capability on non Stingray platform
764          */
765         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
766                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
767
768         HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
769         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
770                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
771         req.ver_maj = RTE_VER_YEAR;
772         req.ver_min = RTE_VER_MONTH;
773         req.ver_upd = RTE_VER_MINOR;
774
775         if (BNXT_PF(bp)) {
776                 req.enables |= rte_cpu_to_le_32(
777                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
778                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
779                        RTE_MIN(sizeof(req.vf_req_fwd),
780                                sizeof(bp->pf.vf_req_fwd)));
781
782                 /*
783                  * PF can sniff HWRM API issued by VF. This can be set up by
784                  * linux driver and inherited by the DPDK PF driver. Clear
785                  * this HWRM sniffer list in FW because DPDK PF driver does
786                  * not support this.
787                  */
788                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
789         }
790
791         req.flags = rte_cpu_to_le_32(flags);
792
793         req.async_event_fwd[0] |=
794                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
795                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
796                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
797                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
798                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
799         if (bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY)
800                 req.async_event_fwd[0] |=
801                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
802         req.async_event_fwd[1] |=
803                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
804                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
805
806         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
807
808         HWRM_CHECK_RESULT();
809
810         flags = rte_le_to_cpu_32(resp->flags);
811         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
812                 bp->flags |= BNXT_FLAG_FW_CAP_IF_CHANGE;
813
814         HWRM_UNLOCK();
815
816         bp->flags |= BNXT_FLAG_REGISTERED;
817
818         return rc;
819 }
820
821 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
822 {
823         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
824                 return 0;
825
826         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
827 }
828
829 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
830 {
831         int rc;
832         uint32_t flags = 0;
833         uint32_t enables;
834         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
835         struct hwrm_func_vf_cfg_input req = {0};
836
837         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
838
839         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
840                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
841                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
842                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
843                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
844
845         if (BNXT_HAS_RING_GRPS(bp)) {
846                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
847                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
848         }
849
850         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
851         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
852                                             AGG_RING_MULTIPLIER);
853         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
854         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
855                                               bp->tx_nr_rings +
856                                               BNXT_NUM_ASYNC_CPR(bp));
857         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
858         if (bp->vf_resv_strategy ==
859             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
860                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
861                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
862                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
863                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
864                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
865                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
866         }
867
868         if (test)
869                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
870                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
871                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
872                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
873                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
874                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
875
876         if (test && BNXT_HAS_RING_GRPS(bp))
877                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
878
879         req.flags = rte_cpu_to_le_32(flags);
880         req.enables |= rte_cpu_to_le_32(enables);
881
882         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
883
884         if (test)
885                 HWRM_CHECK_RESULT_SILENT();
886         else
887                 HWRM_CHECK_RESULT();
888
889         HWRM_UNLOCK();
890         return rc;
891 }
892
893 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
894 {
895         int rc;
896         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
897         struct hwrm_func_resource_qcaps_input req = {0};
898
899         HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
900         req.fid = rte_cpu_to_le_16(0xffff);
901
902         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
903
904         HWRM_CHECK_RESULT();
905
906         if (BNXT_VF(bp)) {
907                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
908                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
909                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
910                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
911                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
912                 /* func_resource_qcaps does not return max_rx_em_flows.
913                  * So use the value provided by func_qcaps.
914                  */
915                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
916                 if (!BNXT_CHIP_THOR(bp))
917                         bp->max_l2_ctx += bp->max_rx_em_flows;
918                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
919                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
920         }
921         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
922         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
923         if (bp->vf_resv_strategy >
924             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
925                 bp->vf_resv_strategy =
926                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
927
928         HWRM_UNLOCK();
929         return rc;
930 }
931
932 int bnxt_hwrm_ver_get(struct bnxt *bp)
933 {
934         int rc = 0;
935         struct hwrm_ver_get_input req = {.req_type = 0 };
936         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
937         uint32_t fw_version;
938         uint16_t max_resp_len;
939         char type[RTE_MEMZONE_NAMESIZE];
940         uint32_t dev_caps_cfg;
941
942         bp->max_req_len = HWRM_MAX_REQ_LEN;
943         HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
944
945         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
946         req.hwrm_intf_min = HWRM_VERSION_MINOR;
947         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
948
949         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
950
951         if (bp->flags & BNXT_FLAG_FW_RESET)
952                 HWRM_CHECK_RESULT_SILENT();
953         else
954                 HWRM_CHECK_RESULT();
955
956         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
957                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
958                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
959                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
960         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
961                      (resp->hwrm_fw_min_8b << 16) |
962                      (resp->hwrm_fw_bld_8b << 8) |
963                      resp->hwrm_fw_rsvd_8b;
964         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
965                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
966
967         fw_version = resp->hwrm_intf_maj_8b << 16;
968         fw_version |= resp->hwrm_intf_min_8b << 8;
969         fw_version |= resp->hwrm_intf_upd_8b;
970         bp->hwrm_spec_code = fw_version;
971
972         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
973                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
974                 rc = -EINVAL;
975                 goto error;
976         }
977
978         if (bp->max_req_len > resp->max_req_win_len) {
979                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
980                 rc = -EINVAL;
981         }
982         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
983         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
984         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
985                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
986
987         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
988         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
989
990         if (bp->max_resp_len != max_resp_len) {
991                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
992                         bp->pdev->addr.domain, bp->pdev->addr.bus,
993                         bp->pdev->addr.devid, bp->pdev->addr.function);
994
995                 rte_free(bp->hwrm_cmd_resp_addr);
996
997                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
998                 if (bp->hwrm_cmd_resp_addr == NULL) {
999                         rc = -ENOMEM;
1000                         goto error;
1001                 }
1002                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1003                 bp->hwrm_cmd_resp_dma_addr =
1004                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1005                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1006                         PMD_DRV_LOG(ERR,
1007                         "Unable to map response buffer to physical memory.\n");
1008                         rc = -ENOMEM;
1009                         goto error;
1010                 }
1011                 bp->max_resp_len = max_resp_len;
1012         }
1013
1014         if ((dev_caps_cfg &
1015                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1016             (dev_caps_cfg &
1017              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1018                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1019                 bp->flags |= BNXT_FLAG_SHORT_CMD;
1020         }
1021
1022         if (((dev_caps_cfg &
1023               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1024              (dev_caps_cfg &
1025               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1026             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1027                 sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
1028                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1029                         bp->pdev->addr.devid, bp->pdev->addr.function);
1030
1031                 rte_free(bp->hwrm_short_cmd_req_addr);
1032
1033                 bp->hwrm_short_cmd_req_addr =
1034                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1035                 if (bp->hwrm_short_cmd_req_addr == NULL) {
1036                         rc = -ENOMEM;
1037                         goto error;
1038                 }
1039                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
1040                 bp->hwrm_short_cmd_req_dma_addr =
1041                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
1042                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1043                         rte_free(bp->hwrm_short_cmd_req_addr);
1044                         PMD_DRV_LOG(ERR,
1045                                 "Unable to map buffer to physical memory.\n");
1046                         rc = -ENOMEM;
1047                         goto error;
1048                 }
1049         }
1050         if (dev_caps_cfg &
1051             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1052                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1053                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1054         }
1055         if (dev_caps_cfg &
1056             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1057                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1058         if (dev_caps_cfg &
1059             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1060                 bp->flags |= BNXT_FLAG_ADV_FLOW_MGMT;
1061                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1062         }
1063
1064 error:
1065         HWRM_UNLOCK();
1066         return rc;
1067 }
1068
1069 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1070 {
1071         int rc;
1072         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1073         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1074
1075         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1076                 return 0;
1077
1078         HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1079         req.flags = flags;
1080
1081         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1082
1083         HWRM_CHECK_RESULT();
1084         HWRM_UNLOCK();
1085
1086         return rc;
1087 }
1088
1089 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1090 {
1091         int rc = 0;
1092         struct hwrm_port_phy_cfg_input req = {0};
1093         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1094         uint32_t enables = 0;
1095
1096         HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1097
1098         if (conf->link_up) {
1099                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1100                 if (bp->link_info.auto_mode && conf->link_speed) {
1101                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1102                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1103                 }
1104
1105                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1106                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
1107                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1108                 /*
1109                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1110                  * any auto mode, even "none".
1111                  */
1112                 if (!conf->link_speed) {
1113                         /* No speeds specified. Enable AutoNeg - all speeds */
1114                         req.auto_mode =
1115                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1116                 }
1117                 /* AutoNeg - Advertise speeds specified. */
1118                 if (conf->auto_link_speed_mask &&
1119                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1120                         req.auto_mode =
1121                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1122                         req.auto_link_speed_mask =
1123                                 conf->auto_link_speed_mask;
1124                         enables |=
1125                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
1126                 }
1127
1128                 req.auto_duplex = conf->duplex;
1129                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1130                 req.auto_pause = conf->auto_pause;
1131                 req.force_pause = conf->force_pause;
1132                 /* Set force_pause if there is no auto or if there is a force */
1133                 if (req.auto_pause && !req.force_pause)
1134                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1135                 else
1136                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1137
1138                 req.enables = rte_cpu_to_le_32(enables);
1139         } else {
1140                 req.flags =
1141                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1142                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1143         }
1144
1145         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1146
1147         HWRM_CHECK_RESULT();
1148         HWRM_UNLOCK();
1149
1150         return rc;
1151 }
1152
1153 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1154                                    struct bnxt_link_info *link_info)
1155 {
1156         int rc = 0;
1157         struct hwrm_port_phy_qcfg_input req = {0};
1158         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1159
1160         HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1161
1162         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1163
1164         HWRM_CHECK_RESULT();
1165
1166         link_info->phy_link_status = resp->link;
1167         link_info->link_up =
1168                 (link_info->phy_link_status ==
1169                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1170         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1171         link_info->duplex = resp->duplex_cfg;
1172         link_info->pause = resp->pause;
1173         link_info->auto_pause = resp->auto_pause;
1174         link_info->force_pause = resp->force_pause;
1175         link_info->auto_mode = resp->auto_mode;
1176         link_info->phy_type = resp->phy_type;
1177         link_info->media_type = resp->media_type;
1178
1179         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1180         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1181         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1182         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1183         link_info->phy_ver[0] = resp->phy_maj;
1184         link_info->phy_ver[1] = resp->phy_min;
1185         link_info->phy_ver[2] = resp->phy_bld;
1186
1187         HWRM_UNLOCK();
1188
1189         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1190         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1191         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1192         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1193         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1194                     link_info->auto_link_speed_mask);
1195         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1196                     link_info->force_link_speed);
1197
1198         return rc;
1199 }
1200
1201 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1202 {
1203         int rc = 0;
1204         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1205         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1206         uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1207         int i;
1208
1209 get_rx_info:
1210         HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1211
1212         req.flags = rte_cpu_to_le_32(dir);
1213         /* HWRM Version >= 1.9.1 */
1214         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1215                 req.drv_qmap_cap =
1216                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1217         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1218
1219         HWRM_CHECK_RESULT();
1220
1221         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1222                 GET_TX_QUEUE_INFO(0);
1223                 GET_TX_QUEUE_INFO(1);
1224                 GET_TX_QUEUE_INFO(2);
1225                 GET_TX_QUEUE_INFO(3);
1226                 GET_TX_QUEUE_INFO(4);
1227                 GET_TX_QUEUE_INFO(5);
1228                 GET_TX_QUEUE_INFO(6);
1229                 GET_TX_QUEUE_INFO(7);
1230         } else  {
1231                 GET_RX_QUEUE_INFO(0);
1232                 GET_RX_QUEUE_INFO(1);
1233                 GET_RX_QUEUE_INFO(2);
1234                 GET_RX_QUEUE_INFO(3);
1235                 GET_RX_QUEUE_INFO(4);
1236                 GET_RX_QUEUE_INFO(5);
1237                 GET_RX_QUEUE_INFO(6);
1238                 GET_RX_QUEUE_INFO(7);
1239         }
1240
1241         HWRM_UNLOCK();
1242
1243         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1244                 goto done;
1245
1246         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1247                 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1248         } else {
1249                 int j;
1250
1251                 /* iterate and find the COSq profile to use for Tx */
1252                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1253                         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1254                                 if (bp->tx_cos_queue[i].id != 0xff)
1255                                         bp->tx_cosq_id[j++] =
1256                                                 bp->tx_cos_queue[i].id;
1257                         }
1258                 } else {
1259                         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1260                                 if (bp->tx_cos_queue[i].profile ==
1261                                         HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1262                                         bp->tx_cosq_id[0] =
1263                                                 bp->tx_cos_queue[i].id;
1264                                         break;
1265                                 }
1266                         }
1267                 }
1268         }
1269
1270         bp->max_tc = resp->max_configurable_queues;
1271         bp->max_lltc = resp->max_configurable_lossless_queues;
1272         if (bp->max_tc > BNXT_MAX_QUEUE)
1273                 bp->max_tc = BNXT_MAX_QUEUE;
1274         bp->max_q = bp->max_tc;
1275
1276         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1277                 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1278                 goto get_rx_info;
1279         }
1280
1281 done:
1282         return rc;
1283 }
1284
1285 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1286                          struct bnxt_ring *ring,
1287                          uint32_t ring_type, uint32_t map_index,
1288                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1289                          uint16_t tx_cosq_id)
1290 {
1291         int rc = 0;
1292         uint32_t enables = 0;
1293         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1294         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1295         struct rte_mempool *mb_pool;
1296         uint16_t rx_buf_size;
1297
1298         HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1299
1300         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1301         req.fbo = rte_cpu_to_le_32(0);
1302         /* Association of ring index with doorbell index */
1303         req.logical_id = rte_cpu_to_le_16(map_index);
1304         req.length = rte_cpu_to_le_32(ring->ring_size);
1305
1306         switch (ring_type) {
1307         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1308                 req.ring_type = ring_type;
1309                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1310                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1311                 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1312                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1313                         enables |=
1314                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1315                 break;
1316         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1317                 req.ring_type = ring_type;
1318                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1319                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1320                 if (BNXT_CHIP_THOR(bp)) {
1321                         mb_pool = bp->rx_queues[0]->mb_pool;
1322                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1323                                       RTE_PKTMBUF_HEADROOM;
1324                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1325                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1326                         enables |=
1327                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1328                 }
1329                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1330                         enables |=
1331                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1332                 break;
1333         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1334                 req.ring_type = ring_type;
1335                 if (BNXT_HAS_NQ(bp)) {
1336                         /* Association of cp ring with nq */
1337                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1338                         enables |=
1339                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1340                 }
1341                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1342                 break;
1343         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1344                 req.ring_type = ring_type;
1345                 req.page_size = BNXT_PAGE_SHFT;
1346                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1347                 break;
1348         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1349                 req.ring_type = ring_type;
1350                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1351
1352                 mb_pool = bp->rx_queues[0]->mb_pool;
1353                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1354                               RTE_PKTMBUF_HEADROOM;
1355                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1356                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1357
1358                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1359                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1360                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1361                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1362                 break;
1363         default:
1364                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1365                         ring_type);
1366                 HWRM_UNLOCK();
1367                 return -EINVAL;
1368         }
1369         req.enables = rte_cpu_to_le_32(enables);
1370
1371         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1372
1373         if (rc || resp->error_code) {
1374                 if (rc == 0 && resp->error_code)
1375                         rc = rte_le_to_cpu_16(resp->error_code);
1376                 switch (ring_type) {
1377                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1378                         PMD_DRV_LOG(ERR,
1379                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1380                         HWRM_UNLOCK();
1381                         return rc;
1382                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1383                         PMD_DRV_LOG(ERR,
1384                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1385                         HWRM_UNLOCK();
1386                         return rc;
1387                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1388                         PMD_DRV_LOG(ERR,
1389                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1390                                     rc);
1391                         HWRM_UNLOCK();
1392                         return rc;
1393                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1394                         PMD_DRV_LOG(ERR,
1395                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1396                         HWRM_UNLOCK();
1397                         return rc;
1398                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1399                         PMD_DRV_LOG(ERR,
1400                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1401                         HWRM_UNLOCK();
1402                         return rc;
1403                 default:
1404                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1405                         HWRM_UNLOCK();
1406                         return rc;
1407                 }
1408         }
1409
1410         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1411         HWRM_UNLOCK();
1412         return rc;
1413 }
1414
1415 int bnxt_hwrm_ring_free(struct bnxt *bp,
1416                         struct bnxt_ring *ring, uint32_t ring_type)
1417 {
1418         int rc;
1419         struct hwrm_ring_free_input req = {.req_type = 0 };
1420         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1421
1422         HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1423
1424         req.ring_type = ring_type;
1425         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1426
1427         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1428
1429         if (rc || resp->error_code) {
1430                 if (rc == 0 && resp->error_code)
1431                         rc = rte_le_to_cpu_16(resp->error_code);
1432                 HWRM_UNLOCK();
1433
1434                 switch (ring_type) {
1435                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1436                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1437                                 rc);
1438                         return rc;
1439                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1440                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1441                                 rc);
1442                         return rc;
1443                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1444                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1445                                 rc);
1446                         return rc;
1447                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1448                         PMD_DRV_LOG(ERR,
1449                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1450                         return rc;
1451                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1452                         PMD_DRV_LOG(ERR,
1453                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1454                         return rc;
1455                 default:
1456                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1457                         return rc;
1458                 }
1459         }
1460         HWRM_UNLOCK();
1461         return 0;
1462 }
1463
1464 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1465 {
1466         int rc = 0;
1467         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1468         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1469
1470         HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1471
1472         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1473         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1474         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1475         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1476
1477         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1478
1479         HWRM_CHECK_RESULT();
1480
1481         bp->grp_info[idx].fw_grp_id =
1482             rte_le_to_cpu_16(resp->ring_group_id);
1483
1484         HWRM_UNLOCK();
1485
1486         return rc;
1487 }
1488
1489 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1490 {
1491         int rc;
1492         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1493         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1494
1495         HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1496
1497         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1498
1499         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1500
1501         HWRM_CHECK_RESULT();
1502         HWRM_UNLOCK();
1503
1504         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1505         return rc;
1506 }
1507
1508 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1509 {
1510         int rc = 0;
1511         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1512         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1513
1514         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1515                 return rc;
1516
1517         HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1518
1519         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1520
1521         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1522
1523         HWRM_CHECK_RESULT();
1524         HWRM_UNLOCK();
1525
1526         return rc;
1527 }
1528
1529 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1530                                 unsigned int idx __rte_unused)
1531 {
1532         int rc;
1533         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1534         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1535
1536         HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1537
1538         req.update_period_ms = rte_cpu_to_le_32(0);
1539
1540         req.stats_dma_addr =
1541             rte_cpu_to_le_64(cpr->hw_stats_map);
1542
1543         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1544
1545         HWRM_CHECK_RESULT();
1546
1547         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1548
1549         HWRM_UNLOCK();
1550
1551         return rc;
1552 }
1553
1554 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1555                                 unsigned int idx __rte_unused)
1556 {
1557         int rc;
1558         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1559         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1560
1561         HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1562
1563         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1564
1565         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1566
1567         HWRM_CHECK_RESULT();
1568         HWRM_UNLOCK();
1569
1570         return rc;
1571 }
1572
1573 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1574 {
1575         int rc = 0, i, j;
1576         struct hwrm_vnic_alloc_input req = { 0 };
1577         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1578
1579         if (!BNXT_HAS_RING_GRPS(bp))
1580                 goto skip_ring_grps;
1581
1582         /* map ring groups to this vnic */
1583         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1584                 vnic->start_grp_id, vnic->end_grp_id);
1585         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1586                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1587
1588         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1589         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1590         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1591         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1592
1593 skip_ring_grps:
1594         vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
1595                                 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
1596         HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1597
1598         if (vnic->func_default)
1599                 req.flags =
1600                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1601         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1602
1603         HWRM_CHECK_RESULT();
1604
1605         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1606         HWRM_UNLOCK();
1607         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1608         return rc;
1609 }
1610
1611 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1612                                         struct bnxt_vnic_info *vnic,
1613                                         struct bnxt_plcmodes_cfg *pmode)
1614 {
1615         int rc = 0;
1616         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1617         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1618
1619         HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1620
1621         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1622
1623         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1624
1625         HWRM_CHECK_RESULT();
1626
1627         pmode->flags = rte_le_to_cpu_32(resp->flags);
1628         /* dflt_vnic bit doesn't exist in the _cfg command */
1629         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1630         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1631         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1632         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1633
1634         HWRM_UNLOCK();
1635
1636         return rc;
1637 }
1638
1639 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1640                                        struct bnxt_vnic_info *vnic,
1641                                        struct bnxt_plcmodes_cfg *pmode)
1642 {
1643         int rc = 0;
1644         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1645         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1646
1647         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1648                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1649                 return rc;
1650         }
1651
1652         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1653
1654         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1655         req.flags = rte_cpu_to_le_32(pmode->flags);
1656         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1657         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1658         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1659         req.enables = rte_cpu_to_le_32(
1660             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1661             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1662             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1663         );
1664
1665         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1666
1667         HWRM_CHECK_RESULT();
1668         HWRM_UNLOCK();
1669
1670         return rc;
1671 }
1672
1673 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1674 {
1675         int rc = 0;
1676         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1677         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1678         struct bnxt_plcmodes_cfg pmodes = { 0 };
1679         uint32_t ctx_enable_flag = 0;
1680         uint32_t enables = 0;
1681
1682         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1683                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1684                 return rc;
1685         }
1686
1687         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1688         if (rc)
1689                 return rc;
1690
1691         HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1692
1693         if (BNXT_CHIP_THOR(bp)) {
1694                 struct bnxt_rx_queue *rxq =
1695                         bp->eth_dev->data->rx_queues[vnic->start_grp_id];
1696                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1697                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1698
1699                 req.default_rx_ring_id =
1700                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1701                 req.default_cmpl_ring_id =
1702                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1703                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1704                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1705                 goto config_mru;
1706         }
1707
1708         /* Only RSS support for now TBD: COS & LB */
1709         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1710         if (vnic->lb_rule != 0xffff)
1711                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1712         if (vnic->cos_rule != 0xffff)
1713                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1714         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1715                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1716                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1717         }
1718         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1719                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
1720                 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
1721         }
1722
1723         enables |= ctx_enable_flag;
1724         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1725         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1726         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1727         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1728
1729 config_mru:
1730         req.enables = rte_cpu_to_le_32(enables);
1731         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1732         req.mru = rte_cpu_to_le_16(vnic->mru);
1733         /* Configure default VNIC only once. */
1734         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1735                 req.flags |=
1736                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1737                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1738         }
1739         if (vnic->vlan_strip)
1740                 req.flags |=
1741                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1742         if (vnic->bd_stall)
1743                 req.flags |=
1744                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1745         if (vnic->roce_dual)
1746                 req.flags |= rte_cpu_to_le_32(
1747                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1748         if (vnic->roce_only)
1749                 req.flags |= rte_cpu_to_le_32(
1750                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1751         if (vnic->rss_dflt_cr)
1752                 req.flags |= rte_cpu_to_le_32(
1753                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1754
1755         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1756
1757         HWRM_CHECK_RESULT();
1758         HWRM_UNLOCK();
1759
1760         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1761
1762         return rc;
1763 }
1764
1765 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1766                 int16_t fw_vf_id)
1767 {
1768         int rc = 0;
1769         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1770         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1771
1772         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1773                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1774                 return rc;
1775         }
1776         HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1777
1778         req.enables =
1779                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1780         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1781         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1782
1783         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1784
1785         HWRM_CHECK_RESULT();
1786
1787         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1788         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1789         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1790         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1791         vnic->mru = rte_le_to_cpu_16(resp->mru);
1792         vnic->func_default = rte_le_to_cpu_32(
1793                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1794         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1795                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1796         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1797                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1798         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1799                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1800         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1801                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1802         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1803                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1804
1805         HWRM_UNLOCK();
1806
1807         return rc;
1808 }
1809
1810 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
1811                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1812 {
1813         int rc = 0;
1814         uint16_t ctx_id;
1815         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1816         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1817                                                 bp->hwrm_cmd_resp_addr;
1818
1819         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1820
1821         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1822         HWRM_CHECK_RESULT();
1823
1824         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1825         if (!BNXT_HAS_RING_GRPS(bp))
1826                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
1827         else if (ctx_idx == 0)
1828                 vnic->rss_rule = ctx_id;
1829
1830         HWRM_UNLOCK();
1831
1832         return rc;
1833 }
1834
1835 static
1836 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
1837                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1838 {
1839         int rc = 0;
1840         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1841         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1842                                                 bp->hwrm_cmd_resp_addr;
1843
1844         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
1845                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1846                 return rc;
1847         }
1848         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1849
1850         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
1851
1852         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1853
1854         HWRM_CHECK_RESULT();
1855         HWRM_UNLOCK();
1856
1857         return rc;
1858 }
1859
1860 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1861 {
1862         int rc = 0;
1863
1864         if (BNXT_CHIP_THOR(bp)) {
1865                 int j;
1866
1867                 for (j = 0; j < vnic->num_lb_ctxts; j++) {
1868                         rc = _bnxt_hwrm_vnic_ctx_free(bp,
1869                                                       vnic,
1870                                                       vnic->fw_grp_ids[j]);
1871                         vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
1872                 }
1873                 vnic->num_lb_ctxts = 0;
1874         } else {
1875                 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
1876                 vnic->rss_rule = INVALID_HW_RING_ID;
1877         }
1878
1879         return rc;
1880 }
1881
1882 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1883 {
1884         int rc = 0;
1885         struct hwrm_vnic_free_input req = {.req_type = 0 };
1886         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1887
1888         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1889                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1890                 return rc;
1891         }
1892
1893         HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1894
1895         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1896
1897         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1898
1899         HWRM_CHECK_RESULT();
1900         HWRM_UNLOCK();
1901
1902         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1903         /* Configure default VNIC again if necessary. */
1904         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
1905                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
1906
1907         return rc;
1908 }
1909
1910 static int
1911 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1912 {
1913         int i;
1914         int rc = 0;
1915         int nr_ctxs = vnic->num_lb_ctxts;
1916         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1917         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1918
1919         for (i = 0; i < nr_ctxs; i++) {
1920                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1921
1922                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1923                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1924                 req.hash_mode_flags = vnic->hash_mode;
1925
1926                 req.hash_key_tbl_addr =
1927                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1928
1929                 req.ring_grp_tbl_addr =
1930                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
1931                                          i * HW_HASH_INDEX_SIZE);
1932                 req.ring_table_pair_index = i;
1933                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
1934
1935                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
1936                                             BNXT_USE_CHIMP_MB);
1937
1938                 HWRM_CHECK_RESULT();
1939                 HWRM_UNLOCK();
1940         }
1941
1942         return rc;
1943 }
1944
1945 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1946                            struct bnxt_vnic_info *vnic)
1947 {
1948         int rc = 0;
1949         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1950         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1951
1952         if (!vnic->rss_table)
1953                 return 0;
1954
1955         if (BNXT_CHIP_THOR(bp))
1956                 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
1957
1958         HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1959
1960         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1961         req.hash_mode_flags = vnic->hash_mode;
1962
1963         req.ring_grp_tbl_addr =
1964             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1965         req.hash_key_tbl_addr =
1966             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1967         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1968         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1969
1970         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1971
1972         HWRM_CHECK_RESULT();
1973         HWRM_UNLOCK();
1974
1975         return rc;
1976 }
1977
1978 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1979                         struct bnxt_vnic_info *vnic)
1980 {
1981         int rc = 0;
1982         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1983         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1984         uint16_t size;
1985
1986         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1987                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1988                 return rc;
1989         }
1990
1991         HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1992
1993         req.flags = rte_cpu_to_le_32(
1994                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1995
1996         req.enables = rte_cpu_to_le_32(
1997                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1998
1999         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2000         size -= RTE_PKTMBUF_HEADROOM;
2001         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2002
2003         req.jumbo_thresh = rte_cpu_to_le_16(size);
2004         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2005
2006         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2007
2008         HWRM_CHECK_RESULT();
2009         HWRM_UNLOCK();
2010
2011         return rc;
2012 }
2013
2014 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2015                         struct bnxt_vnic_info *vnic, bool enable)
2016 {
2017         int rc = 0;
2018         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2019         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2020
2021         if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
2022                 if (enable)
2023                         PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2024                 return -ENOTSUP;
2025         }
2026
2027         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2028                 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2029                 return 0;
2030         }
2031
2032         HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2033
2034         if (enable) {
2035                 req.enables = rte_cpu_to_le_32(
2036                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2037                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2038                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2039                 req.flags = rte_cpu_to_le_32(
2040                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2041                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2042                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2043                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2044                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2045                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2046                 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2047                 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2048                 req.min_agg_len = rte_cpu_to_le_32(512);
2049         }
2050         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2051
2052         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2053
2054         HWRM_CHECK_RESULT();
2055         HWRM_UNLOCK();
2056
2057         return rc;
2058 }
2059
2060 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2061 {
2062         struct hwrm_func_cfg_input req = {0};
2063         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2064         int rc;
2065
2066         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2067         req.enables = rte_cpu_to_le_32(
2068                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2069         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2070         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2071
2072         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2073
2074         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2075         HWRM_CHECK_RESULT();
2076         HWRM_UNLOCK();
2077
2078         bp->pf.vf_info[vf].random_mac = false;
2079
2080         return rc;
2081 }
2082
2083 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2084                                   uint64_t *dropped)
2085 {
2086         int rc = 0;
2087         struct hwrm_func_qstats_input req = {.req_type = 0};
2088         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2089
2090         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2091
2092         req.fid = rte_cpu_to_le_16(fid);
2093
2094         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2095
2096         HWRM_CHECK_RESULT();
2097
2098         if (dropped)
2099                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2100
2101         HWRM_UNLOCK();
2102
2103         return rc;
2104 }
2105
2106 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2107                           struct rte_eth_stats *stats)
2108 {
2109         int rc = 0;
2110         struct hwrm_func_qstats_input req = {.req_type = 0};
2111         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2112
2113         HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2114
2115         req.fid = rte_cpu_to_le_16(fid);
2116
2117         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2118
2119         HWRM_CHECK_RESULT();
2120
2121         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2122         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2123         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2124         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2125         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2126         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2127
2128         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2129         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2130         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2131         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2132         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2133         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2134
2135         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2136         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2137         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2138
2139         HWRM_UNLOCK();
2140
2141         return rc;
2142 }
2143
2144 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2145 {
2146         int rc = 0;
2147         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2148         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2149
2150         HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2151
2152         req.fid = rte_cpu_to_le_16(fid);
2153
2154         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2155
2156         HWRM_CHECK_RESULT();
2157         HWRM_UNLOCK();
2158
2159         return rc;
2160 }
2161
2162 /*
2163  * HWRM utility functions
2164  */
2165
2166 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2167 {
2168         unsigned int i;
2169         int rc = 0;
2170
2171         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2172                 struct bnxt_tx_queue *txq;
2173                 struct bnxt_rx_queue *rxq;
2174                 struct bnxt_cp_ring_info *cpr;
2175
2176                 if (i >= bp->rx_cp_nr_rings) {
2177                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2178                         cpr = txq->cp_ring;
2179                 } else {
2180                         rxq = bp->rx_queues[i];
2181                         cpr = rxq->cp_ring;
2182                 }
2183
2184                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2185                 if (rc)
2186                         return rc;
2187         }
2188         return 0;
2189 }
2190
2191 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2192 {
2193         int rc;
2194         unsigned int i;
2195         struct bnxt_cp_ring_info *cpr;
2196
2197         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2198
2199                 if (i >= bp->rx_cp_nr_rings) {
2200                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2201                 } else {
2202                         cpr = bp->rx_queues[i]->cp_ring;
2203                         if (BNXT_HAS_RING_GRPS(bp))
2204                                 bp->grp_info[i].fw_stats_ctx = -1;
2205                 }
2206                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2207                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2208                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2209                         if (rc)
2210                                 return rc;
2211                 }
2212         }
2213         return 0;
2214 }
2215
2216 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2217 {
2218         unsigned int i;
2219         int rc = 0;
2220
2221         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2222                 struct bnxt_tx_queue *txq;
2223                 struct bnxt_rx_queue *rxq;
2224                 struct bnxt_cp_ring_info *cpr;
2225
2226                 if (i >= bp->rx_cp_nr_rings) {
2227                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2228                         cpr = txq->cp_ring;
2229                 } else {
2230                         rxq = bp->rx_queues[i];
2231                         cpr = rxq->cp_ring;
2232                 }
2233
2234                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2235
2236                 if (rc)
2237                         return rc;
2238         }
2239         return rc;
2240 }
2241
2242 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2243 {
2244         uint16_t idx;
2245         uint32_t rc = 0;
2246
2247         if (!BNXT_HAS_RING_GRPS(bp))
2248                 return 0;
2249
2250         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2251
2252                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2253                         continue;
2254
2255                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2256
2257                 if (rc)
2258                         return rc;
2259         }
2260         return rc;
2261 }
2262
2263 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2264 {
2265         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2266
2267         bnxt_hwrm_ring_free(bp, cp_ring,
2268                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2269         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2270         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2271                                      sizeof(*cpr->cp_desc_ring));
2272         cpr->cp_raw_cons = 0;
2273         cpr->valid = 0;
2274 }
2275
2276 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2277 {
2278         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2279
2280         bnxt_hwrm_ring_free(bp, cp_ring,
2281                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2282         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2283         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2284                         sizeof(*cpr->cp_desc_ring));
2285         cpr->cp_raw_cons = 0;
2286         cpr->valid = 0;
2287 }
2288
2289 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2290 {
2291         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2292         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2293         struct bnxt_ring *ring = rxr->rx_ring_struct;
2294         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2295
2296         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2297                 bnxt_hwrm_ring_free(bp, ring,
2298                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2299                 ring->fw_ring_id = INVALID_HW_RING_ID;
2300                 if (BNXT_HAS_RING_GRPS(bp))
2301                         bp->grp_info[queue_index].rx_fw_ring_id =
2302                                                         INVALID_HW_RING_ID;
2303                 memset(rxr->rx_desc_ring, 0,
2304                        rxr->rx_ring_struct->ring_size *
2305                        sizeof(*rxr->rx_desc_ring));
2306                 memset(rxr->rx_buf_ring, 0,
2307                        rxr->rx_ring_struct->ring_size *
2308                        sizeof(*rxr->rx_buf_ring));
2309                 rxr->rx_prod = 0;
2310         }
2311         ring = rxr->ag_ring_struct;
2312         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2313                 bnxt_hwrm_ring_free(bp, ring,
2314                                     BNXT_CHIP_THOR(bp) ?
2315                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2316                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2317                 ring->fw_ring_id = INVALID_HW_RING_ID;
2318                 memset(rxr->ag_buf_ring, 0,
2319                        rxr->ag_ring_struct->ring_size *
2320                        sizeof(*rxr->ag_buf_ring));
2321                 rxr->ag_prod = 0;
2322                 if (BNXT_HAS_RING_GRPS(bp))
2323                         bp->grp_info[queue_index].ag_fw_ring_id =
2324                                                         INVALID_HW_RING_ID;
2325         }
2326         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2327                 bnxt_free_cp_ring(bp, cpr);
2328
2329         if (BNXT_HAS_RING_GRPS(bp))
2330                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2331 }
2332
2333 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
2334 {
2335         unsigned int i;
2336
2337         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2338                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2339                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2340                 struct bnxt_ring *ring = txr->tx_ring_struct;
2341                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2342
2343                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2344                         bnxt_hwrm_ring_free(bp, ring,
2345                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2346                         ring->fw_ring_id = INVALID_HW_RING_ID;
2347                         memset(txr->tx_desc_ring, 0,
2348                                         txr->tx_ring_struct->ring_size *
2349                                         sizeof(*txr->tx_desc_ring));
2350                         memset(txr->tx_buf_ring, 0,
2351                                         txr->tx_ring_struct->ring_size *
2352                                         sizeof(*txr->tx_buf_ring));
2353                         txr->tx_prod = 0;
2354                         txr->tx_cons = 0;
2355                 }
2356                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2357                         bnxt_free_cp_ring(bp, cpr);
2358                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2359                 }
2360         }
2361
2362         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2363                 bnxt_free_hwrm_rx_ring(bp, i);
2364
2365         return 0;
2366 }
2367
2368 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2369 {
2370         uint16_t i;
2371         uint32_t rc = 0;
2372
2373         if (!BNXT_HAS_RING_GRPS(bp))
2374                 return 0;
2375
2376         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2377                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2378                 if (rc)
2379                         return rc;
2380         }
2381         return rc;
2382 }
2383
2384 void bnxt_free_hwrm_resources(struct bnxt *bp)
2385 {
2386         /* Release memzone */
2387         rte_free(bp->hwrm_cmd_resp_addr);
2388         rte_free(bp->hwrm_short_cmd_req_addr);
2389         bp->hwrm_cmd_resp_addr = NULL;
2390         bp->hwrm_short_cmd_req_addr = NULL;
2391         bp->hwrm_cmd_resp_dma_addr = 0;
2392         bp->hwrm_short_cmd_req_dma_addr = 0;
2393 }
2394
2395 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2396 {
2397         struct rte_pci_device *pdev = bp->pdev;
2398         char type[RTE_MEMZONE_NAMESIZE];
2399
2400         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2401                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2402         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2403         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2404         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2405         if (bp->hwrm_cmd_resp_addr == NULL)
2406                 return -ENOMEM;
2407         bp->hwrm_cmd_resp_dma_addr =
2408                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2409         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2410                 PMD_DRV_LOG(ERR,
2411                         "unable to map response address to physical memory\n");
2412                 return -ENOMEM;
2413         }
2414         rte_spinlock_init(&bp->hwrm_lock);
2415
2416         return 0;
2417 }
2418
2419 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2420 {
2421         struct bnxt_filter_info *filter;
2422         int rc = 0;
2423
2424         STAILQ_FOREACH(filter, &vnic->filter, next) {
2425                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2426                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2427                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2428                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2429                 else
2430                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2431                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2432                 bnxt_free_filter(bp, filter);
2433                 //if (rc)
2434                         //break;
2435         }
2436         return rc;
2437 }
2438
2439 static int
2440 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2441 {
2442         struct bnxt_filter_info *filter;
2443         struct rte_flow *flow;
2444         int rc = 0;
2445
2446         while (!STAILQ_EMPTY(&vnic->flow_list)) {
2447                 flow = STAILQ_FIRST(&vnic->flow_list);
2448                 filter = flow->filter;
2449                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2450                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2451                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
2452                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2453                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2454                 else
2455                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2456
2457                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2458                 rte_free(flow);
2459                 //if (rc)
2460                         //break;
2461         }
2462         return rc;
2463 }
2464
2465 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2466 {
2467         struct bnxt_filter_info *filter;
2468         int rc = 0;
2469
2470         STAILQ_FOREACH(filter, &vnic->filter, next) {
2471                 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2472                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2473                                                      filter);
2474                 } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2475                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2476                                                          filter);
2477                 } else {
2478                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2479                                                      filter);
2480                         if (!rc)
2481                                 filter->dflt = 1;
2482                 }
2483                 if (rc)
2484                         break;
2485         }
2486         return rc;
2487 }
2488
2489 void bnxt_free_tunnel_ports(struct bnxt *bp)
2490 {
2491         if (bp->vxlan_port_cnt)
2492                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2493                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2494         bp->vxlan_port = 0;
2495         if (bp->geneve_port_cnt)
2496                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2497                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2498         bp->geneve_port = 0;
2499 }
2500
2501 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2502 {
2503         int i;
2504
2505         if (bp->vnic_info == NULL)
2506                 return;
2507
2508         /*
2509          * Cleanup VNICs in reverse order, to make sure the L2 filter
2510          * from vnic0 is last to be cleaned up.
2511          */
2512         for (i = bp->max_vnics - 1; i >= 0; i--) {
2513                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2514
2515                 // If the VNIC ID is invalid we are not currently using the VNIC
2516                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2517                         continue;
2518
2519                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2520
2521                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2522
2523                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2524
2525                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2526
2527                 bnxt_hwrm_vnic_free(bp, vnic);
2528
2529                 rte_free(vnic->fw_grp_ids);
2530         }
2531         /* Ring resources */
2532         bnxt_free_all_hwrm_rings(bp);
2533         bnxt_free_all_hwrm_ring_grps(bp);
2534         bnxt_free_all_hwrm_stat_ctxs(bp);
2535         bnxt_free_tunnel_ports(bp);
2536 }
2537
2538 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2539 {
2540         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2541
2542         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2543                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2544
2545         switch (conf_link_speed) {
2546         case ETH_LINK_SPEED_10M_HD:
2547         case ETH_LINK_SPEED_100M_HD:
2548                 /* FALLTHROUGH */
2549                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2550         }
2551         return hw_link_duplex;
2552 }
2553
2554 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2555 {
2556         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2557 }
2558
2559 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2560 {
2561         uint16_t eth_link_speed = 0;
2562
2563         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2564                 return ETH_LINK_SPEED_AUTONEG;
2565
2566         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2567         case ETH_LINK_SPEED_100M:
2568         case ETH_LINK_SPEED_100M_HD:
2569                 /* FALLTHROUGH */
2570                 eth_link_speed =
2571                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2572                 break;
2573         case ETH_LINK_SPEED_1G:
2574                 eth_link_speed =
2575                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2576                 break;
2577         case ETH_LINK_SPEED_2_5G:
2578                 eth_link_speed =
2579                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2580                 break;
2581         case ETH_LINK_SPEED_10G:
2582                 eth_link_speed =
2583                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2584                 break;
2585         case ETH_LINK_SPEED_20G:
2586                 eth_link_speed =
2587                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2588                 break;
2589         case ETH_LINK_SPEED_25G:
2590                 eth_link_speed =
2591                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2592                 break;
2593         case ETH_LINK_SPEED_40G:
2594                 eth_link_speed =
2595                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2596                 break;
2597         case ETH_LINK_SPEED_50G:
2598                 eth_link_speed =
2599                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2600                 break;
2601         case ETH_LINK_SPEED_100G:
2602                 eth_link_speed =
2603                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2604                 break;
2605         default:
2606                 PMD_DRV_LOG(ERR,
2607                         "Unsupported link speed %d; default to AUTO\n",
2608                         conf_link_speed);
2609                 break;
2610         }
2611         return eth_link_speed;
2612 }
2613
2614 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2615                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2616                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2617                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2618
2619 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2620 {
2621         uint32_t one_speed;
2622
2623         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2624                 return 0;
2625
2626         if (link_speed & ETH_LINK_SPEED_FIXED) {
2627                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2628
2629                 if (one_speed & (one_speed - 1)) {
2630                         PMD_DRV_LOG(ERR,
2631                                 "Invalid advertised speeds (%u) for port %u\n",
2632                                 link_speed, port_id);
2633                         return -EINVAL;
2634                 }
2635                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2636                         PMD_DRV_LOG(ERR,
2637                                 "Unsupported advertised speed (%u) for port %u\n",
2638                                 link_speed, port_id);
2639                         return -EINVAL;
2640                 }
2641         } else {
2642                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2643                         PMD_DRV_LOG(ERR,
2644                                 "Unsupported advertised speeds (%u) for port %u\n",
2645                                 link_speed, port_id);
2646                         return -EINVAL;
2647                 }
2648         }
2649         return 0;
2650 }
2651
2652 static uint16_t
2653 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2654 {
2655         uint16_t ret = 0;
2656
2657         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2658                 if (bp->link_info.support_speeds)
2659                         return bp->link_info.support_speeds;
2660                 link_speed = BNXT_SUPPORTED_SPEEDS;
2661         }
2662
2663         if (link_speed & ETH_LINK_SPEED_100M)
2664                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2665         if (link_speed & ETH_LINK_SPEED_100M_HD)
2666                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2667         if (link_speed & ETH_LINK_SPEED_1G)
2668                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2669         if (link_speed & ETH_LINK_SPEED_2_5G)
2670                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2671         if (link_speed & ETH_LINK_SPEED_10G)
2672                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2673         if (link_speed & ETH_LINK_SPEED_20G)
2674                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2675         if (link_speed & ETH_LINK_SPEED_25G)
2676                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2677         if (link_speed & ETH_LINK_SPEED_40G)
2678                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2679         if (link_speed & ETH_LINK_SPEED_50G)
2680                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2681         if (link_speed & ETH_LINK_SPEED_100G)
2682                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2683         return ret;
2684 }
2685
2686 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2687 {
2688         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2689
2690         switch (hw_link_speed) {
2691         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2692                 eth_link_speed = ETH_SPEED_NUM_100M;
2693                 break;
2694         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2695                 eth_link_speed = ETH_SPEED_NUM_1G;
2696                 break;
2697         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2698                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2699                 break;
2700         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2701                 eth_link_speed = ETH_SPEED_NUM_10G;
2702                 break;
2703         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2704                 eth_link_speed = ETH_SPEED_NUM_20G;
2705                 break;
2706         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2707                 eth_link_speed = ETH_SPEED_NUM_25G;
2708                 break;
2709         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2710                 eth_link_speed = ETH_SPEED_NUM_40G;
2711                 break;
2712         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2713                 eth_link_speed = ETH_SPEED_NUM_50G;
2714                 break;
2715         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2716                 eth_link_speed = ETH_SPEED_NUM_100G;
2717                 break;
2718         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2719         default:
2720                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2721                         hw_link_speed);
2722                 break;
2723         }
2724         return eth_link_speed;
2725 }
2726
2727 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2728 {
2729         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2730
2731         switch (hw_link_duplex) {
2732         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2733         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2734                 /* FALLTHROUGH */
2735                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2736                 break;
2737         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2738                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2739                 break;
2740         default:
2741                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2742                         hw_link_duplex);
2743                 break;
2744         }
2745         return eth_link_duplex;
2746 }
2747
2748 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2749 {
2750         int rc = 0;
2751         struct bnxt_link_info *link_info = &bp->link_info;
2752
2753         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2754         if (rc) {
2755                 PMD_DRV_LOG(ERR,
2756                         "Get link config failed with rc %d\n", rc);
2757                 goto exit;
2758         }
2759         if (link_info->link_speed)
2760                 link->link_speed =
2761                         bnxt_parse_hw_link_speed(link_info->link_speed);
2762         else
2763                 link->link_speed = ETH_SPEED_NUM_NONE;
2764         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2765         link->link_status = link_info->link_up;
2766         link->link_autoneg = link_info->auto_mode ==
2767                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2768                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2769 exit:
2770         return rc;
2771 }
2772
2773 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2774 {
2775         int rc = 0;
2776         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2777         struct bnxt_link_info link_req;
2778         uint16_t speed, autoneg;
2779
2780         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2781                 return 0;
2782
2783         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2784                         bp->eth_dev->data->port_id);
2785         if (rc)
2786                 goto error;
2787
2788         memset(&link_req, 0, sizeof(link_req));
2789         link_req.link_up = link_up;
2790         if (!link_up)
2791                 goto port_phy_cfg;
2792
2793         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2794         if (BNXT_CHIP_THOR(bp) &&
2795             dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
2796                 /* 40G is not supported as part of media auto detect.
2797                  * The speed should be forced and autoneg disabled
2798                  * to configure 40G speed.
2799                  */
2800                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
2801                 autoneg = 0;
2802         }
2803
2804         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2805         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2806         /* Autoneg can be done only when the FW allows.
2807          * When user configures fixed speed of 40G and later changes to
2808          * any other speed, auto_link_speed/force_link_speed is still set
2809          * to 40G until link comes up at new speed.
2810          */
2811         if (autoneg == 1 &&
2812             !(!BNXT_CHIP_THOR(bp) &&
2813               (bp->link_info.auto_link_speed ||
2814                bp->link_info.force_link_speed))) {
2815                 link_req.phy_flags |=
2816                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2817                 link_req.auto_link_speed_mask =
2818                         bnxt_parse_eth_link_speed_mask(bp,
2819                                                        dev_conf->link_speeds);
2820         } else {
2821                 if (bp->link_info.phy_type ==
2822                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2823                     bp->link_info.phy_type ==
2824                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2825                     bp->link_info.media_type ==
2826                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2827                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2828                         return -EINVAL;
2829                 }
2830
2831                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2832                 /* If user wants a particular speed try that first. */
2833                 if (speed)
2834                         link_req.link_speed = speed;
2835                 else if (bp->link_info.force_link_speed)
2836                         link_req.link_speed = bp->link_info.force_link_speed;
2837                 else
2838                         link_req.link_speed = bp->link_info.auto_link_speed;
2839         }
2840         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2841         link_req.auto_pause = bp->link_info.auto_pause;
2842         link_req.force_pause = bp->link_info.force_pause;
2843
2844 port_phy_cfg:
2845         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2846         if (rc) {
2847                 PMD_DRV_LOG(ERR,
2848                         "Set link config failed with rc %d\n", rc);
2849         }
2850
2851 error:
2852         return rc;
2853 }
2854
2855 /* JIRA 22088 */
2856 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
2857 {
2858         struct hwrm_func_qcfg_input req = {0};
2859         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2860         uint16_t flags;
2861         int rc = 0;
2862
2863         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2864         req.fid = rte_cpu_to_le_16(0xffff);
2865
2866         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2867
2868         HWRM_CHECK_RESULT();
2869
2870         /* Hard Coded.. 0xfff VLAN ID mask */
2871         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2872         flags = rte_le_to_cpu_16(resp->flags);
2873         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2874                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2875
2876         if (BNXT_VF(bp) &&
2877             !BNXT_VF_IS_TRUSTED(bp) &&
2878             (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2879                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2880                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2881         } else if (BNXT_VF(bp) &&
2882                    BNXT_VF_IS_TRUSTED(bp) &&
2883                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2884                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
2885                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
2886         }
2887
2888         if (mtu)
2889                 *mtu = resp->mtu;
2890
2891         switch (resp->port_partition_type) {
2892         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2893         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2894         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2895                 /* FALLTHROUGH */
2896                 bp->port_partition_type = resp->port_partition_type;
2897                 break;
2898         default:
2899                 bp->port_partition_type = 0;
2900                 break;
2901         }
2902
2903         HWRM_UNLOCK();
2904
2905         return rc;
2906 }
2907
2908 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2909                                    struct hwrm_func_qcaps_output *qcaps)
2910 {
2911         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2912         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2913                sizeof(qcaps->mac_address));
2914         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2915         qcaps->max_rx_rings = fcfg->num_rx_rings;
2916         qcaps->max_tx_rings = fcfg->num_tx_rings;
2917         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2918         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2919         qcaps->max_vfs = 0;
2920         qcaps->first_vf_id = 0;
2921         qcaps->max_vnics = fcfg->num_vnics;
2922         qcaps->max_decap_records = 0;
2923         qcaps->max_encap_records = 0;
2924         qcaps->max_tx_wm_flows = 0;
2925         qcaps->max_tx_em_flows = 0;
2926         qcaps->max_rx_wm_flows = 0;
2927         qcaps->max_rx_em_flows = 0;
2928         qcaps->max_flow_id = 0;
2929         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2930         qcaps->max_sp_tx_rings = 0;
2931         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2932 }
2933
2934 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2935 {
2936         struct hwrm_func_cfg_input req = {0};
2937         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2938         uint32_t enables;
2939         int rc;
2940
2941         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2942                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2943                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2944                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2945                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2946                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2947                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2948                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2949                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
2950
2951         if (BNXT_HAS_RING_GRPS(bp)) {
2952                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
2953                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2954         } else if (BNXT_HAS_NQ(bp)) {
2955                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
2956                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
2957         }
2958
2959         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2960         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2961         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2962                                    RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2963                                    BNXT_NUM_VLANS);
2964         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2965         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2966         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2967         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2968         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2969         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2970         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2971         req.fid = rte_cpu_to_le_16(0xffff);
2972         req.enables = rte_cpu_to_le_32(enables);
2973
2974         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2975
2976         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2977
2978         HWRM_CHECK_RESULT();
2979         HWRM_UNLOCK();
2980
2981         return rc;
2982 }
2983
2984 static void populate_vf_func_cfg_req(struct bnxt *bp,
2985                                      struct hwrm_func_cfg_input *req,
2986                                      int num_vfs)
2987 {
2988         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2989                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2990                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2991                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2992                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2993                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2994                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2995                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2996                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2997                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2998
2999         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3000                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3001                                     BNXT_NUM_VLANS);
3002         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3003                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3004                                     BNXT_NUM_VLANS);
3005         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3006                                                 (num_vfs + 1));
3007         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3008         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3009                                                (num_vfs + 1));
3010         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3011         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3012         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3013         /* TODO: For now, do not support VMDq/RFS on VFs. */
3014         req->num_vnics = rte_cpu_to_le_16(1);
3015         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3016                                                  (num_vfs + 1));
3017 }
3018
3019 static void add_random_mac_if_needed(struct bnxt *bp,
3020                                      struct hwrm_func_cfg_input *cfg_req,
3021                                      int vf)
3022 {
3023         struct rte_ether_addr mac;
3024
3025         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
3026                 return;
3027
3028         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
3029                 cfg_req->enables |=
3030                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3031                 rte_eth_random_addr(cfg_req->dflt_mac_addr);
3032                 bp->pf.vf_info[vf].random_mac = true;
3033         } else {
3034                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
3035                         RTE_ETHER_ADDR_LEN);
3036         }
3037 }
3038
3039 static void reserve_resources_from_vf(struct bnxt *bp,
3040                                       struct hwrm_func_cfg_input *cfg_req,
3041                                       int vf)
3042 {
3043         struct hwrm_func_qcaps_input req = {0};
3044         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3045         int rc;
3046
3047         /* Get the actual allocated values now */
3048         HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
3049         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3050         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3051
3052         if (rc) {
3053                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
3054                 copy_func_cfg_to_qcaps(cfg_req, resp);
3055         } else if (resp->error_code) {
3056                 rc = rte_le_to_cpu_16(resp->error_code);
3057                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
3058                 copy_func_cfg_to_qcaps(cfg_req, resp);
3059         }
3060
3061         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
3062         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
3063         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
3064         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
3065         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
3066         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
3067         /*
3068          * TODO: While not supporting VMDq with VFs, max_vnics is always
3069          * forced to 1 in this case
3070          */
3071         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
3072         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
3073
3074         HWRM_UNLOCK();
3075 }
3076
3077 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3078 {
3079         struct hwrm_func_qcfg_input req = {0};
3080         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3081         int rc;
3082
3083         /* Check for zero MAC address */
3084         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3085         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3086         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3087         HWRM_CHECK_RESULT();
3088         rc = rte_le_to_cpu_16(resp->vlan);
3089
3090         HWRM_UNLOCK();
3091
3092         return rc;
3093 }
3094
3095 static int update_pf_resource_max(struct bnxt *bp)
3096 {
3097         struct hwrm_func_qcfg_input req = {0};
3098         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3099         int rc;
3100
3101         /* And copy the allocated numbers into the pf struct */
3102         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3103         req.fid = rte_cpu_to_le_16(0xffff);
3104         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3105         HWRM_CHECK_RESULT();
3106
3107         /* Only TX ring value reflects actual allocation? TODO */
3108         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3109         bp->pf.evb_mode = resp->evb_mode;
3110
3111         HWRM_UNLOCK();
3112
3113         return rc;
3114 }
3115
3116 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3117 {
3118         int rc;
3119
3120         if (!BNXT_PF(bp)) {
3121                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3122                 return -EINVAL;
3123         }
3124
3125         rc = bnxt_hwrm_func_qcaps(bp);
3126         if (rc)
3127                 return rc;
3128
3129         bp->pf.func_cfg_flags &=
3130                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3131                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3132         bp->pf.func_cfg_flags |=
3133                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3134         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3135         rc = __bnxt_hwrm_func_qcaps(bp);
3136         return rc;
3137 }
3138
3139 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3140 {
3141         struct hwrm_func_cfg_input req = {0};
3142         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3143         int i;
3144         size_t sz;
3145         int rc = 0;
3146         size_t req_buf_sz;
3147
3148         if (!BNXT_PF(bp)) {
3149                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3150                 return -EINVAL;
3151         }
3152
3153         rc = bnxt_hwrm_func_qcaps(bp);
3154
3155         if (rc)
3156                 return rc;
3157
3158         bp->pf.active_vfs = num_vfs;
3159
3160         /*
3161          * First, configure the PF to only use one TX ring.  This ensures that
3162          * there are enough rings for all VFs.
3163          *
3164          * If we don't do this, when we call func_alloc() later, we will lock
3165          * extra rings to the PF that won't be available during func_cfg() of
3166          * the VFs.
3167          *
3168          * This has been fixed with firmware versions above 20.6.54
3169          */
3170         bp->pf.func_cfg_flags &=
3171                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3172                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3173         bp->pf.func_cfg_flags |=
3174                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3175         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
3176         if (rc)
3177                 return rc;
3178
3179         /*
3180          * Now, create and register a buffer to hold forwarded VF requests
3181          */
3182         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3183         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3184                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3185         if (bp->pf.vf_req_buf == NULL) {
3186                 rc = -ENOMEM;
3187                 goto error_free;
3188         }
3189         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3190                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
3191         for (i = 0; i < num_vfs; i++)
3192                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
3193                                         (i * HWRM_MAX_REQ_LEN);
3194
3195         rc = bnxt_hwrm_func_buf_rgtr(bp);
3196         if (rc)
3197                 goto error_free;
3198
3199         populate_vf_func_cfg_req(bp, &req, num_vfs);
3200
3201         bp->pf.active_vfs = 0;
3202         for (i = 0; i < num_vfs; i++) {
3203                 add_random_mac_if_needed(bp, &req, i);
3204
3205                 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3206                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
3207                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
3208                 rc = bnxt_hwrm_send_message(bp,
3209                                             &req,
3210                                             sizeof(req),
3211                                             BNXT_USE_CHIMP_MB);
3212
3213                 /* Clear enable flag for next pass */
3214                 req.enables &= ~rte_cpu_to_le_32(
3215                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3216
3217                 if (rc || resp->error_code) {
3218                         PMD_DRV_LOG(ERR,
3219                                 "Failed to initizlie VF %d\n", i);
3220                         PMD_DRV_LOG(ERR,
3221                                 "Not all VFs available. (%d, %d)\n",
3222                                 rc, resp->error_code);
3223                         HWRM_UNLOCK();
3224                         break;
3225                 }
3226
3227                 HWRM_UNLOCK();
3228
3229                 reserve_resources_from_vf(bp, &req, i);
3230                 bp->pf.active_vfs++;
3231                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
3232         }
3233
3234         /*
3235          * Now configure the PF to use "the rest" of the resources
3236          * We're using STD_TX_RING_MODE here though which will limit the TX
3237          * rings.  This will allow QoS to function properly.  Not setting this
3238          * will cause PF rings to break bandwidth settings.
3239          */
3240         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3241         if (rc)
3242                 goto error_free;
3243
3244         rc = update_pf_resource_max(bp);
3245         if (rc)
3246                 goto error_free;
3247
3248         return rc;
3249
3250 error_free:
3251         bnxt_hwrm_func_buf_unrgtr(bp);
3252         return rc;
3253 }
3254
3255 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3256 {
3257         struct hwrm_func_cfg_input req = {0};
3258         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3259         int rc;
3260
3261         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3262
3263         req.fid = rte_cpu_to_le_16(0xffff);
3264         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3265         req.evb_mode = bp->pf.evb_mode;
3266
3267         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3268         HWRM_CHECK_RESULT();
3269         HWRM_UNLOCK();
3270
3271         return rc;
3272 }
3273
3274 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3275                                 uint8_t tunnel_type)
3276 {
3277         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3278         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3279         int rc = 0;
3280
3281         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3282         req.tunnel_type = tunnel_type;
3283         req.tunnel_dst_port_val = port;
3284         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3285         HWRM_CHECK_RESULT();
3286
3287         switch (tunnel_type) {
3288         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3289                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3290                 bp->vxlan_port = port;
3291                 break;
3292         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3293                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
3294                 bp->geneve_port = port;
3295                 break;
3296         default:
3297                 break;
3298         }
3299
3300         HWRM_UNLOCK();
3301
3302         return rc;
3303 }
3304
3305 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3306                                 uint8_t tunnel_type)
3307 {
3308         struct hwrm_tunnel_dst_port_free_input req = {0};
3309         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3310         int rc = 0;
3311
3312         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3313
3314         req.tunnel_type = tunnel_type;
3315         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3316         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3317
3318         HWRM_CHECK_RESULT();
3319         HWRM_UNLOCK();
3320
3321         return rc;
3322 }
3323
3324 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3325                                         uint32_t flags)
3326 {
3327         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3328         struct hwrm_func_cfg_input req = {0};
3329         int rc;
3330
3331         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3332
3333         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3334         req.flags = rte_cpu_to_le_32(flags);
3335         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3336
3337         HWRM_CHECK_RESULT();
3338         HWRM_UNLOCK();
3339
3340         return rc;
3341 }
3342
3343 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3344 {
3345         uint32_t *flag = flagp;
3346
3347         vnic->flags = *flag;
3348 }
3349
3350 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3351 {
3352         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3353 }
3354
3355 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
3356 {
3357         int rc = 0;
3358         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3359         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3360
3361         HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3362
3363         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3364         req.req_buf_page_size = rte_cpu_to_le_16(
3365                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
3366         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3367         req.req_buf_page_addr0 =
3368                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
3369         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3370                 PMD_DRV_LOG(ERR,
3371                         "unable to map buffer address to physical memory\n");
3372                 return -ENOMEM;
3373         }
3374
3375         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3376
3377         HWRM_CHECK_RESULT();
3378         HWRM_UNLOCK();
3379
3380         return rc;
3381 }
3382
3383 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3384 {
3385         int rc = 0;
3386         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3387         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3388
3389         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3390                 return 0;
3391
3392         HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3393
3394         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3395
3396         HWRM_CHECK_RESULT();
3397         HWRM_UNLOCK();
3398
3399         return rc;
3400 }
3401
3402 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3403 {
3404         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3405         struct hwrm_func_cfg_input req = {0};
3406         int rc;
3407
3408         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3409
3410         req.fid = rte_cpu_to_le_16(0xffff);
3411         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
3412         req.enables = rte_cpu_to_le_32(
3413                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3414         req.async_event_cr = rte_cpu_to_le_16(
3415                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3416         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3417
3418         HWRM_CHECK_RESULT();
3419         HWRM_UNLOCK();
3420
3421         return rc;
3422 }
3423
3424 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3425 {
3426         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3427         struct hwrm_func_vf_cfg_input req = {0};
3428         int rc;
3429
3430         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3431
3432         req.enables = rte_cpu_to_le_32(
3433                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3434         req.async_event_cr = rte_cpu_to_le_16(
3435                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3436         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3437
3438         HWRM_CHECK_RESULT();
3439         HWRM_UNLOCK();
3440
3441         return rc;
3442 }
3443
3444 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3445 {
3446         struct hwrm_func_cfg_input req = {0};
3447         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3448         uint16_t dflt_vlan, fid;
3449         uint32_t func_cfg_flags;
3450         int rc = 0;
3451
3452         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3453
3454         if (is_vf) {
3455                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3456                 fid = bp->pf.vf_info[vf].fid;
3457                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3458         } else {
3459                 fid = rte_cpu_to_le_16(0xffff);
3460                 func_cfg_flags = bp->pf.func_cfg_flags;
3461                 dflt_vlan = bp->vlan;
3462         }
3463
3464         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3465         req.fid = rte_cpu_to_le_16(fid);
3466         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3467         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3468
3469         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3470
3471         HWRM_CHECK_RESULT();
3472         HWRM_UNLOCK();
3473
3474         return rc;
3475 }
3476
3477 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3478                         uint16_t max_bw, uint16_t enables)
3479 {
3480         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3481         struct hwrm_func_cfg_input req = {0};
3482         int rc;
3483
3484         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3485
3486         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3487         req.enables |= rte_cpu_to_le_32(enables);
3488         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3489         req.max_bw = rte_cpu_to_le_32(max_bw);
3490         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3491
3492         HWRM_CHECK_RESULT();
3493         HWRM_UNLOCK();
3494
3495         return rc;
3496 }
3497
3498 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3499 {
3500         struct hwrm_func_cfg_input req = {0};
3501         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3502         int rc = 0;
3503
3504         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3505
3506         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3507         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3508         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3509         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3510
3511         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3512
3513         HWRM_CHECK_RESULT();
3514         HWRM_UNLOCK();
3515
3516         return rc;
3517 }
3518
3519 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3520 {
3521         int rc;
3522
3523         if (BNXT_PF(bp))
3524                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3525         else
3526                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3527
3528         return rc;
3529 }
3530
3531 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3532                               void *encaped, size_t ec_size)
3533 {
3534         int rc = 0;
3535         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3536         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3537
3538         if (ec_size > sizeof(req.encap_request))
3539                 return -1;
3540
3541         HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3542
3543         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3544         memcpy(req.encap_request, encaped, ec_size);
3545
3546         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3547
3548         HWRM_CHECK_RESULT();
3549         HWRM_UNLOCK();
3550
3551         return rc;
3552 }
3553
3554 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3555                                        struct rte_ether_addr *mac)
3556 {
3557         struct hwrm_func_qcfg_input req = {0};
3558         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3559         int rc;
3560
3561         HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3562
3563         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3564         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3565
3566         HWRM_CHECK_RESULT();
3567
3568         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3569
3570         HWRM_UNLOCK();
3571
3572         return rc;
3573 }
3574
3575 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3576                             void *encaped, size_t ec_size)
3577 {
3578         int rc = 0;
3579         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3580         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3581
3582         if (ec_size > sizeof(req.encap_request))
3583                 return -1;
3584
3585         HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3586
3587         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3588         memcpy(req.encap_request, encaped, ec_size);
3589
3590         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3591
3592         HWRM_CHECK_RESULT();
3593         HWRM_UNLOCK();
3594
3595         return rc;
3596 }
3597
3598 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3599                          struct rte_eth_stats *stats, uint8_t rx)
3600 {
3601         int rc = 0;
3602         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3603         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3604
3605         HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3606
3607         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3608
3609         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3610
3611         HWRM_CHECK_RESULT();
3612
3613         if (rx) {
3614                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3615                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3616                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3617                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3618                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3619                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3620                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3621                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3622         } else {
3623                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3624                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3625                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3626                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3627                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3628                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3629         }
3630
3631
3632         HWRM_UNLOCK();
3633
3634         return rc;
3635 }
3636
3637 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3638 {
3639         struct hwrm_port_qstats_input req = {0};
3640         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3641         struct bnxt_pf_info *pf = &bp->pf;
3642         int rc;
3643
3644         HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3645
3646         req.port_id = rte_cpu_to_le_16(pf->port_id);
3647         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3648         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3649         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3650
3651         HWRM_CHECK_RESULT();
3652         HWRM_UNLOCK();
3653
3654         return rc;
3655 }
3656
3657 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3658 {
3659         struct hwrm_port_clr_stats_input req = {0};
3660         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3661         struct bnxt_pf_info *pf = &bp->pf;
3662         int rc;
3663
3664         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3665         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3666             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3667                 return 0;
3668
3669         HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3670
3671         req.port_id = rte_cpu_to_le_16(pf->port_id);
3672         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3673
3674         HWRM_CHECK_RESULT();
3675         HWRM_UNLOCK();
3676
3677         return rc;
3678 }
3679
3680 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3681 {
3682         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3683         struct hwrm_port_led_qcaps_input req = {0};
3684         int rc;
3685
3686         if (BNXT_VF(bp))
3687                 return 0;
3688
3689         HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3690         req.port_id = bp->pf.port_id;
3691         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3692
3693         HWRM_CHECK_RESULT();
3694
3695         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3696                 unsigned int i;
3697
3698                 bp->num_leds = resp->num_leds;
3699                 memcpy(bp->leds, &resp->led0_id,
3700                         sizeof(bp->leds[0]) * bp->num_leds);
3701                 for (i = 0; i < bp->num_leds; i++) {
3702                         struct bnxt_led_info *led = &bp->leds[i];
3703
3704                         uint16_t caps = led->led_state_caps;
3705
3706                         if (!led->led_group_id ||
3707                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3708                                 bp->num_leds = 0;
3709                                 break;
3710                         }
3711                 }
3712         }
3713
3714         HWRM_UNLOCK();
3715
3716         return rc;
3717 }
3718
3719 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3720 {
3721         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3722         struct hwrm_port_led_cfg_input req = {0};
3723         struct bnxt_led_cfg *led_cfg;
3724         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3725         uint16_t duration = 0;
3726         int rc, i;
3727
3728         if (!bp->num_leds || BNXT_VF(bp))
3729                 return -EOPNOTSUPP;
3730
3731         HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3732
3733         if (led_on) {
3734                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3735                 duration = rte_cpu_to_le_16(500);
3736         }
3737         req.port_id = bp->pf.port_id;
3738         req.num_leds = bp->num_leds;
3739         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3740         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3741                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3742                 led_cfg->led_id = bp->leds[i].led_id;
3743                 led_cfg->led_state = led_state;
3744                 led_cfg->led_blink_on = duration;
3745                 led_cfg->led_blink_off = duration;
3746                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3747         }
3748
3749         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3750
3751         HWRM_CHECK_RESULT();
3752         HWRM_UNLOCK();
3753
3754         return rc;
3755 }
3756
3757 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3758                                uint32_t *length)
3759 {
3760         int rc;
3761         struct hwrm_nvm_get_dir_info_input req = {0};
3762         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3763
3764         HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3765
3766         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3767
3768         HWRM_CHECK_RESULT();
3769
3770         *entries = rte_le_to_cpu_32(resp->entries);
3771         *length = rte_le_to_cpu_32(resp->entry_length);
3772
3773         HWRM_UNLOCK();
3774         return rc;
3775 }
3776
3777 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3778 {
3779         int rc;
3780         uint32_t dir_entries;
3781         uint32_t entry_length;
3782         uint8_t *buf;
3783         size_t buflen;
3784         rte_iova_t dma_handle;
3785         struct hwrm_nvm_get_dir_entries_input req = {0};
3786         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3787
3788         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3789         if (rc != 0)
3790                 return rc;
3791
3792         *data++ = dir_entries;
3793         *data++ = entry_length;
3794         len -= 2;
3795         memset(data, 0xff, len);
3796
3797         buflen = dir_entries * entry_length;
3798         buf = rte_malloc("nvm_dir", buflen, 0);
3799         rte_mem_lock_page(buf);
3800         if (buf == NULL)
3801                 return -ENOMEM;
3802         dma_handle = rte_mem_virt2iova(buf);
3803         if (dma_handle == RTE_BAD_IOVA) {
3804                 PMD_DRV_LOG(ERR,
3805                         "unable to map response address to physical memory\n");
3806                 return -ENOMEM;
3807         }
3808         HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3809         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3810         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3811
3812         if (rc == 0)
3813                 memcpy(data, buf, len > buflen ? buflen : len);
3814
3815         rte_free(buf);
3816         HWRM_CHECK_RESULT();
3817         HWRM_UNLOCK();
3818
3819         return rc;
3820 }
3821
3822 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3823                              uint32_t offset, uint32_t length,
3824                              uint8_t *data)
3825 {
3826         int rc;
3827         uint8_t *buf;
3828         rte_iova_t dma_handle;
3829         struct hwrm_nvm_read_input req = {0};
3830         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3831
3832         buf = rte_malloc("nvm_item", length, 0);
3833         rte_mem_lock_page(buf);
3834         if (!buf)
3835                 return -ENOMEM;
3836
3837         dma_handle = rte_mem_virt2iova(buf);
3838         if (dma_handle == RTE_BAD_IOVA) {
3839                 PMD_DRV_LOG(ERR,
3840                         "unable to map response address to physical memory\n");
3841                 return -ENOMEM;
3842         }
3843         HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3844         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3845         req.dir_idx = rte_cpu_to_le_16(index);
3846         req.offset = rte_cpu_to_le_32(offset);
3847         req.len = rte_cpu_to_le_32(length);
3848         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3849         if (rc == 0)
3850                 memcpy(data, buf, length);
3851
3852         rte_free(buf);
3853         HWRM_CHECK_RESULT();
3854         HWRM_UNLOCK();
3855
3856         return rc;
3857 }
3858
3859 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3860 {
3861         int rc;
3862         struct hwrm_nvm_erase_dir_entry_input req = {0};
3863         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3864
3865         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3866         req.dir_idx = rte_cpu_to_le_16(index);
3867         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3868         HWRM_CHECK_RESULT();
3869         HWRM_UNLOCK();
3870
3871         return rc;
3872 }
3873
3874
3875 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3876                           uint16_t dir_ordinal, uint16_t dir_ext,
3877                           uint16_t dir_attr, const uint8_t *data,
3878                           size_t data_len)
3879 {
3880         int rc;
3881         struct hwrm_nvm_write_input req = {0};
3882         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3883         rte_iova_t dma_handle;
3884         uint8_t *buf;
3885
3886         buf = rte_malloc("nvm_write", data_len, 0);
3887         rte_mem_lock_page(buf);
3888         if (!buf)
3889                 return -ENOMEM;
3890
3891         dma_handle = rte_mem_virt2iova(buf);
3892         if (dma_handle == RTE_BAD_IOVA) {
3893                 PMD_DRV_LOG(ERR,
3894                         "unable to map response address to physical memory\n");
3895                 return -ENOMEM;
3896         }
3897         memcpy(buf, data, data_len);
3898
3899         HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3900
3901         req.dir_type = rte_cpu_to_le_16(dir_type);
3902         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3903         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3904         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3905         req.dir_data_length = rte_cpu_to_le_32(data_len);
3906         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3907
3908         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3909
3910         rte_free(buf);
3911         HWRM_CHECK_RESULT();
3912         HWRM_UNLOCK();
3913
3914         return rc;
3915 }
3916
3917 static void
3918 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3919 {
3920         uint32_t *count = cbdata;
3921
3922         *count = *count + 1;
3923 }
3924
3925 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3926                                      struct bnxt_vnic_info *vnic __rte_unused)
3927 {
3928         return 0;
3929 }
3930
3931 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3932 {
3933         uint32_t count = 0;
3934
3935         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3936             &count, bnxt_vnic_count_hwrm_stub);
3937
3938         return count;
3939 }
3940
3941 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3942                                         uint16_t *vnic_ids)
3943 {
3944         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3945         struct hwrm_func_vf_vnic_ids_query_output *resp =
3946                                                 bp->hwrm_cmd_resp_addr;
3947         int rc;
3948
3949         /* First query all VNIC ids */
3950         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3951
3952         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3953         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3954         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3955
3956         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
3957                 HWRM_UNLOCK();
3958                 PMD_DRV_LOG(ERR,
3959                 "unable to map VNIC ID table address to physical memory\n");
3960                 return -ENOMEM;
3961         }
3962         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3963         HWRM_CHECK_RESULT();
3964         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3965
3966         HWRM_UNLOCK();
3967
3968         return rc;
3969 }
3970
3971 /*
3972  * This function queries the VNIC IDs  for a specified VF. It then calls
3973  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3974  * Then it calls the hwrm_cb function to program this new vnic configuration.
3975  */
3976 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3977         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3978         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3979 {
3980         struct bnxt_vnic_info vnic;
3981         int rc = 0;
3982         int i, num_vnic_ids;
3983         uint16_t *vnic_ids;
3984         size_t vnic_id_sz;
3985         size_t sz;
3986
3987         /* First query all VNIC ids */
3988         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3989         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3990                         RTE_CACHE_LINE_SIZE);
3991         if (vnic_ids == NULL)
3992                 return -ENOMEM;
3993
3994         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3995                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3996
3997         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3998
3999         if (num_vnic_ids < 0)
4000                 return num_vnic_ids;
4001
4002         /* Retrieve VNIC, update bd_stall then update */
4003
4004         for (i = 0; i < num_vnic_ids; i++) {
4005                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4006                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4007                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
4008                 if (rc)
4009                         break;
4010                 if (vnic.mru <= 4)      /* Indicates unallocated */
4011                         continue;
4012
4013                 vnic_cb(&vnic, cbdata);
4014
4015                 rc = hwrm_cb(bp, &vnic);
4016                 if (rc)
4017                         break;
4018         }
4019
4020         rte_free(vnic_ids);
4021
4022         return rc;
4023 }
4024
4025 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4026                                               bool on)
4027 {
4028         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4029         struct hwrm_func_cfg_input req = {0};
4030         int rc;
4031
4032         HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
4033
4034         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
4035         req.enables |= rte_cpu_to_le_32(
4036                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4037         req.vlan_antispoof_mode = on ?
4038                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4039                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4040         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4041
4042         HWRM_CHECK_RESULT();
4043         HWRM_UNLOCK();
4044
4045         return rc;
4046 }
4047
4048 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4049 {
4050         struct bnxt_vnic_info vnic;
4051         uint16_t *vnic_ids;
4052         size_t vnic_id_sz;
4053         int num_vnic_ids, i;
4054         size_t sz;
4055         int rc;
4056
4057         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
4058         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4059                         RTE_CACHE_LINE_SIZE);
4060         if (vnic_ids == NULL)
4061                 return -ENOMEM;
4062
4063         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4064                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4065
4066         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4067         if (rc <= 0)
4068                 goto exit;
4069         num_vnic_ids = rc;
4070
4071         /*
4072          * Loop through to find the default VNIC ID.
4073          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4074          * by sending the hwrm_func_qcfg command to the firmware.
4075          */
4076         for (i = 0; i < num_vnic_ids; i++) {
4077                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4078                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4079                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4080                                         bp->pf.first_vf_id + vf);
4081                 if (rc)
4082                         goto exit;
4083                 if (vnic.func_default) {
4084                         rte_free(vnic_ids);
4085                         return vnic.fw_vnic_id;
4086                 }
4087         }
4088         /* Could not find a default VNIC. */
4089         PMD_DRV_LOG(ERR, "No default VNIC\n");
4090 exit:
4091         rte_free(vnic_ids);
4092         return rc;
4093 }
4094
4095 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4096                          uint16_t dst_id,
4097                          struct bnxt_filter_info *filter)
4098 {
4099         int rc = 0;
4100         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4101         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4102         uint32_t enables = 0;
4103
4104         if (filter->fw_em_filter_id != UINT64_MAX)
4105                 bnxt_hwrm_clear_em_filter(bp, filter);
4106
4107         HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4108
4109         req.flags = rte_cpu_to_le_32(filter->flags);
4110
4111         enables = filter->enables |
4112               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4113         req.dst_id = rte_cpu_to_le_16(dst_id);
4114
4115         if (filter->ip_addr_type) {
4116                 req.ip_addr_type = filter->ip_addr_type;
4117                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4118         }
4119         if (enables &
4120             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4121                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4122         if (enables &
4123             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4124                 memcpy(req.src_macaddr, filter->src_macaddr,
4125                        RTE_ETHER_ADDR_LEN);
4126         if (enables &
4127             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4128                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4129                        RTE_ETHER_ADDR_LEN);
4130         if (enables &
4131             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4132                 req.ovlan_vid = filter->l2_ovlan;
4133         if (enables &
4134             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4135                 req.ivlan_vid = filter->l2_ivlan;
4136         if (enables &
4137             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4138                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4139         if (enables &
4140             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4141                 req.ip_protocol = filter->ip_protocol;
4142         if (enables &
4143             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4144                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4145         if (enables &
4146             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4147                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4148         if (enables &
4149             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4150                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4151         if (enables &
4152             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4153                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4154         if (enables &
4155             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4156                 req.mirror_vnic_id = filter->mirror_vnic_id;
4157
4158         req.enables = rte_cpu_to_le_32(enables);
4159
4160         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4161
4162         HWRM_CHECK_RESULT();
4163
4164         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4165         HWRM_UNLOCK();
4166
4167         return rc;
4168 }
4169
4170 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4171 {
4172         int rc = 0;
4173         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4174         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4175
4176         if (filter->fw_em_filter_id == UINT64_MAX)
4177                 return 0;
4178
4179         PMD_DRV_LOG(ERR, "Clear EM filter\n");
4180         HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4181
4182         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4183
4184         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4185
4186         HWRM_CHECK_RESULT();
4187         HWRM_UNLOCK();
4188
4189         filter->fw_em_filter_id = UINT64_MAX;
4190         filter->fw_l2_filter_id = UINT64_MAX;
4191
4192         return 0;
4193 }
4194
4195 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4196                          uint16_t dst_id,
4197                          struct bnxt_filter_info *filter)
4198 {
4199         int rc = 0;
4200         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4201         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4202                                                 bp->hwrm_cmd_resp_addr;
4203         uint32_t enables = 0;
4204
4205         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4206                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4207
4208         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4209
4210         req.flags = rte_cpu_to_le_32(filter->flags);
4211
4212         enables = filter->enables |
4213               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4214         req.dst_id = rte_cpu_to_le_16(dst_id);
4215
4216
4217         if (filter->ip_addr_type) {
4218                 req.ip_addr_type = filter->ip_addr_type;
4219                 enables |=
4220                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4221         }
4222         if (enables &
4223             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4224                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4225         if (enables &
4226             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4227                 memcpy(req.src_macaddr, filter->src_macaddr,
4228                        RTE_ETHER_ADDR_LEN);
4229         //if (enables &
4230             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
4231                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
4232                        //RTE_ETHER_ADDR_LEN);
4233         if (enables &
4234             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4235                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4236         if (enables &
4237             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4238                 req.ip_protocol = filter->ip_protocol;
4239         if (enables &
4240             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4241                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4242         if (enables &
4243             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4244                 req.src_ipaddr_mask[0] =
4245                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4246         if (enables &
4247             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4248                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4249         if (enables &
4250             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4251                 req.dst_ipaddr_mask[0] =
4252                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4253         if (enables &
4254             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4255                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4256         if (enables &
4257             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4258                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4259         if (enables &
4260             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4261                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4262         if (enables &
4263             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4264                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4265         if (enables &
4266             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4267                 req.mirror_vnic_id = filter->mirror_vnic_id;
4268
4269         req.enables = rte_cpu_to_le_32(enables);
4270
4271         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4272
4273         HWRM_CHECK_RESULT();
4274
4275         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4276         HWRM_UNLOCK();
4277
4278         return rc;
4279 }
4280
4281 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4282                                 struct bnxt_filter_info *filter)
4283 {
4284         int rc = 0;
4285         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4286         struct hwrm_cfa_ntuple_filter_free_output *resp =
4287                                                 bp->hwrm_cmd_resp_addr;
4288
4289         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4290                 return 0;
4291
4292         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4293
4294         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4295
4296         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4297
4298         HWRM_CHECK_RESULT();
4299         HWRM_UNLOCK();
4300
4301         filter->fw_ntuple_filter_id = UINT64_MAX;
4302
4303         return 0;
4304 }
4305
4306 static int
4307 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4308 {
4309         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4310         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4311         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4312         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4313         uint16_t *ring_tbl = vnic->rss_table;
4314         int nr_ctxs = vnic->num_lb_ctxts;
4315         int max_rings = bp->rx_nr_rings;
4316         int i, j, k, cnt;
4317         int rc = 0;
4318
4319         for (i = 0, k = 0; i < nr_ctxs; i++) {
4320                 struct bnxt_rx_ring_info *rxr;
4321                 struct bnxt_cp_ring_info *cpr;
4322
4323                 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4324
4325                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4326                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4327                 req.hash_mode_flags = vnic->hash_mode;
4328
4329                 req.ring_grp_tbl_addr =
4330                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4331                                      i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4332                                      2 * sizeof(*ring_tbl));
4333                 req.hash_key_tbl_addr =
4334                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4335
4336                 req.ring_table_pair_index = i;
4337                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4338
4339                 for (j = 0; j < 64; j++) {
4340                         uint16_t ring_id;
4341
4342                         /* Find next active ring. */
4343                         for (cnt = 0; cnt < max_rings; cnt++) {
4344                                 if (rx_queue_state[k] !=
4345                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4346                                         break;
4347                                 if (++k == max_rings)
4348                                         k = 0;
4349                         }
4350
4351                         /* Return if no rings are active. */
4352                         if (cnt == max_rings)
4353                                 return 0;
4354
4355                         /* Add rx/cp ring pair to RSS table. */
4356                         rxr = rxqs[k]->rx_ring;
4357                         cpr = rxqs[k]->cp_ring;
4358
4359                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4360                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4361                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4362                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4363
4364                         if (++k == max_rings)
4365                                 k = 0;
4366                 }
4367                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4368                                             BNXT_USE_CHIMP_MB);
4369
4370                 HWRM_CHECK_RESULT();
4371                 HWRM_UNLOCK();
4372         }
4373
4374         return rc;
4375 }
4376
4377 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4378 {
4379         unsigned int rss_idx, fw_idx, i;
4380
4381         if (!(vnic->rss_table && vnic->hash_type))
4382                 return 0;
4383
4384         if (BNXT_CHIP_THOR(bp))
4385                 return bnxt_vnic_rss_configure_thor(bp, vnic);
4386
4387         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4388                 return 0;
4389
4390         if (vnic->rss_table && vnic->hash_type) {
4391                 /*
4392                  * Fill the RSS hash & redirection table with
4393                  * ring group ids for all VNICs
4394                  */
4395                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4396                         rss_idx++, fw_idx++) {
4397                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4398                                 fw_idx %= bp->rx_cp_nr_rings;
4399                                 if (vnic->fw_grp_ids[fw_idx] !=
4400                                     INVALID_HW_RING_ID)
4401                                         break;
4402                                 fw_idx++;
4403                         }
4404                         if (i == bp->rx_cp_nr_rings)
4405                                 return 0;
4406                         vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4407                 }
4408                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4409         }
4410
4411         return 0;
4412 }
4413
4414 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4415         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4416 {
4417         uint16_t flags;
4418
4419         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4420
4421         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4422         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4423
4424         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4425         req->num_cmpl_dma_aggr_during_int =
4426                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4427
4428         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4429
4430         /* min timer set to 1/2 of interrupt timer */
4431         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4432
4433         /* buf timer set to 1/4 of interrupt timer */
4434         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4435
4436         req->cmpl_aggr_dma_tmr_during_int =
4437                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4438
4439         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4440                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4441         req->flags = rte_cpu_to_le_16(flags);
4442 }
4443
4444 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4445                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4446 {
4447         struct hwrm_ring_aggint_qcaps_input req = {0};
4448         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4449         uint32_t enables;
4450         uint16_t flags;
4451         int rc;
4452
4453         HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4454         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4455         HWRM_CHECK_RESULT();
4456
4457         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4458         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4459
4460         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4461                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4462         agg_req->flags = rte_cpu_to_le_16(flags);
4463         enables =
4464          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4465          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4466         agg_req->enables = rte_cpu_to_le_32(enables);
4467
4468         HWRM_UNLOCK();
4469         return rc;
4470 }
4471
4472 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4473                         struct bnxt_coal *coal, uint16_t ring_id)
4474 {
4475         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4476         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4477                                                 bp->hwrm_cmd_resp_addr;
4478         int rc;
4479
4480         /* Set ring coalesce parameters only for 100G NICs */
4481         if (BNXT_CHIP_THOR(bp)) {
4482                 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4483                         return -1;
4484         } else if (bnxt_stratus_device(bp)) {
4485                 bnxt_hwrm_set_coal_params(coal, &req);
4486         } else {
4487                 return 0;
4488         }
4489
4490         HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
4491         req.ring_id = rte_cpu_to_le_16(ring_id);
4492         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4493         HWRM_CHECK_RESULT();
4494         HWRM_UNLOCK();
4495         return 0;
4496 }
4497
4498 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4499 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4500 {
4501         struct hwrm_func_backing_store_qcaps_input req = {0};
4502         struct hwrm_func_backing_store_qcaps_output *resp =
4503                 bp->hwrm_cmd_resp_addr;
4504         struct bnxt_ctx_pg_info *ctx_pg;
4505         struct bnxt_ctx_mem_info *ctx;
4506         int total_alloc_len;
4507         int rc, i;
4508
4509         if (!BNXT_CHIP_THOR(bp) ||
4510             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
4511             BNXT_VF(bp) ||
4512             bp->ctx)
4513                 return 0;
4514
4515         HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
4516         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4517         HWRM_CHECK_RESULT_SILENT();
4518
4519         total_alloc_len = sizeof(*ctx);
4520         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
4521                           RTE_CACHE_LINE_SIZE);
4522         if (!ctx) {
4523                 rc = -ENOMEM;
4524                 goto ctx_err;
4525         }
4526
4527         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
4528                             sizeof(*ctx_pg) * BNXT_MAX_Q,
4529                             RTE_CACHE_LINE_SIZE);
4530         if (!ctx_pg) {
4531                 rc = -ENOMEM;
4532                 goto ctx_err;
4533         }
4534         for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
4535                 ctx->tqm_mem[i] = ctx_pg;
4536
4537         bp->ctx = ctx;
4538         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
4539         ctx->qp_min_qp1_entries =
4540                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
4541         ctx->qp_max_l2_entries =
4542                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
4543         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
4544         ctx->srq_max_l2_entries =
4545                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
4546         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
4547         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
4548         ctx->cq_max_l2_entries =
4549                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
4550         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
4551         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
4552         ctx->vnic_max_vnic_entries =
4553                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
4554         ctx->vnic_max_ring_table_entries =
4555                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
4556         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
4557         ctx->stat_max_entries =
4558                 rte_le_to_cpu_32(resp->stat_max_entries);
4559         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
4560         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
4561         ctx->tqm_min_entries_per_ring =
4562                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
4563         ctx->tqm_max_entries_per_ring =
4564                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
4565         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
4566         if (!ctx->tqm_entries_multiple)
4567                 ctx->tqm_entries_multiple = 1;
4568         ctx->mrav_max_entries =
4569                 rte_le_to_cpu_32(resp->mrav_max_entries);
4570         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
4571         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
4572         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
4573 ctx_err:
4574         HWRM_UNLOCK();
4575         return rc;
4576 }
4577
4578 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
4579 {
4580         struct hwrm_func_backing_store_cfg_input req = {0};
4581         struct hwrm_func_backing_store_cfg_output *resp =
4582                 bp->hwrm_cmd_resp_addr;
4583         struct bnxt_ctx_mem_info *ctx = bp->ctx;
4584         struct bnxt_ctx_pg_info *ctx_pg;
4585         uint32_t *num_entries;
4586         uint64_t *pg_dir;
4587         uint8_t *pg_attr;
4588         uint32_t ena;
4589         int i, rc;
4590
4591         if (!ctx)
4592                 return 0;
4593
4594         HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
4595         req.enables = rte_cpu_to_le_32(enables);
4596
4597         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
4598                 ctx_pg = &ctx->qp_mem;
4599                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4600                 req.qp_num_qp1_entries =
4601                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
4602                 req.qp_num_l2_entries =
4603                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
4604                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
4605                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4606                                       &req.qpc_pg_size_qpc_lvl,
4607                                       &req.qpc_page_dir);
4608         }
4609
4610         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
4611                 ctx_pg = &ctx->srq_mem;
4612                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4613                 req.srq_num_l2_entries =
4614                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
4615                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
4616                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4617                                       &req.srq_pg_size_srq_lvl,
4618                                       &req.srq_page_dir);
4619         }
4620
4621         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
4622                 ctx_pg = &ctx->cq_mem;
4623                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4624                 req.cq_num_l2_entries =
4625                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
4626                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
4627                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4628                                       &req.cq_pg_size_cq_lvl,
4629                                       &req.cq_page_dir);
4630         }
4631
4632         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
4633                 ctx_pg = &ctx->vnic_mem;
4634                 req.vnic_num_vnic_entries =
4635                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
4636                 req.vnic_num_ring_table_entries =
4637                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
4638                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
4639                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4640                                       &req.vnic_pg_size_vnic_lvl,
4641                                       &req.vnic_page_dir);
4642         }
4643
4644         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
4645                 ctx_pg = &ctx->stat_mem;
4646                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
4647                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
4648                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4649                                       &req.stat_pg_size_stat_lvl,
4650                                       &req.stat_page_dir);
4651         }
4652
4653         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4654         num_entries = &req.tqm_sp_num_entries;
4655         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
4656         pg_dir = &req.tqm_sp_page_dir;
4657         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
4658         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
4659                 if (!(enables & ena))
4660                         continue;
4661
4662                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4663
4664                 ctx_pg = ctx->tqm_mem[i];
4665                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
4666                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
4667         }
4668
4669         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4670         HWRM_CHECK_RESULT();
4671         HWRM_UNLOCK();
4672
4673         return rc;
4674 }
4675
4676 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
4677 {
4678         struct hwrm_port_qstats_ext_input req = {0};
4679         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
4680         struct bnxt_pf_info *pf = &bp->pf;
4681         int rc;
4682
4683         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
4684               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
4685                 return 0;
4686
4687         HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
4688
4689         req.port_id = rte_cpu_to_le_16(pf->port_id);
4690         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
4691                 req.tx_stat_host_addr =
4692                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
4693                 req.tx_stat_size =
4694                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
4695         }
4696         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
4697                 req.rx_stat_host_addr =
4698                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
4699                 req.rx_stat_size =
4700                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
4701         }
4702         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4703
4704         if (rc) {
4705                 bp->fw_rx_port_stats_ext_size = 0;
4706                 bp->fw_tx_port_stats_ext_size = 0;
4707         } else {
4708                 bp->fw_rx_port_stats_ext_size =
4709                         rte_le_to_cpu_16(resp->rx_stat_size);
4710                 bp->fw_tx_port_stats_ext_size =
4711                         rte_le_to_cpu_16(resp->tx_stat_size);
4712         }
4713
4714         HWRM_CHECK_RESULT();
4715         HWRM_UNLOCK();
4716
4717         return rc;
4718 }
4719
4720 int
4721 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
4722 {
4723         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
4724         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
4725                 bp->hwrm_cmd_resp_addr;
4726         int rc = 0;
4727
4728         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
4729         req.tunnel_type = type;
4730         req.dest_fid = bp->fw_fid;
4731         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4732         HWRM_CHECK_RESULT();
4733
4734         HWRM_UNLOCK();
4735
4736         return rc;
4737 }
4738
4739 int
4740 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
4741 {
4742         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
4743         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
4744                 bp->hwrm_cmd_resp_addr;
4745         int rc = 0;
4746
4747         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
4748         req.tunnel_type = type;
4749         req.dest_fid = bp->fw_fid;
4750         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4751         HWRM_CHECK_RESULT();
4752
4753         HWRM_UNLOCK();
4754
4755         return rc;
4756 }
4757
4758 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
4759 {
4760         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
4761         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
4762                 bp->hwrm_cmd_resp_addr;
4763         int rc = 0;
4764
4765         HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
4766         req.src_fid = bp->fw_fid;
4767         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4768         HWRM_CHECK_RESULT();
4769
4770         if (type)
4771                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
4772
4773         HWRM_UNLOCK();
4774
4775         return rc;
4776 }
4777
4778 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
4779                                    uint16_t *dst_fid)
4780 {
4781         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
4782         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
4783                 bp->hwrm_cmd_resp_addr;
4784         int rc = 0;
4785
4786         HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
4787         req.src_fid = bp->fw_fid;
4788         req.tunnel_type = tun_type;
4789         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4790         HWRM_CHECK_RESULT();
4791
4792         if (dst_fid)
4793                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
4794
4795         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
4796
4797         HWRM_UNLOCK();
4798
4799         return rc;
4800 }
4801
4802 int bnxt_hwrm_set_mac(struct bnxt *bp)
4803 {
4804         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4805         struct hwrm_func_vf_cfg_input req = {0};
4806         int rc = 0;
4807
4808         if (!BNXT_VF(bp))
4809                 return 0;
4810
4811         HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4812
4813         req.enables =
4814                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
4815         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4816
4817         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4818
4819         HWRM_CHECK_RESULT();
4820
4821         memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
4822         HWRM_UNLOCK();
4823
4824         return rc;
4825 }
4826
4827 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
4828 {
4829         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
4830         struct hwrm_func_drv_if_change_input req = {0};
4831         uint32_t flags;
4832         int rc;
4833
4834         if (!(bp->flags & BNXT_FLAG_FW_CAP_IF_CHANGE))
4835                 return 0;
4836
4837         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
4838          * If we issue FUNC_DRV_IF_CHANGE with flags down before
4839          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
4840          */
4841         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
4842                 return 0;
4843
4844         HWRM_PREP(req, FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
4845
4846         if (up)
4847                 req.flags =
4848                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
4849
4850         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4851
4852         HWRM_CHECK_RESULT();
4853         flags = rte_le_to_cpu_32(resp->flags);
4854         HWRM_UNLOCK();
4855
4856         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
4857                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
4858                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
4859         }
4860
4861         return 0;
4862 }
4863
4864 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
4865 {
4866         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4867         struct bnxt_error_recovery_info *info = bp->recovery_info;
4868         struct hwrm_error_recovery_qcfg_input req = {0};
4869         uint32_t flags = 0;
4870         unsigned int i;
4871         int rc;
4872
4873         /* Older FW does not have error recovery support */
4874         if (!(bp->flags & BNXT_FLAG_FW_CAP_ERROR_RECOVERY))
4875                 return 0;
4876
4877         if (!info) {
4878                 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg",
4879                                    sizeof(*info), 0);
4880                 bp->recovery_info = info;
4881                 if (info == NULL)
4882                         return -ENOMEM;
4883         } else {
4884                 memset(info, 0, sizeof(*info));
4885         }
4886
4887         HWRM_PREP(req, ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
4888
4889         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4890
4891         HWRM_CHECK_RESULT();
4892
4893         flags = rte_le_to_cpu_32(resp->flags);
4894         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
4895                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
4896         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
4897                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
4898
4899         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
4900             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
4901                 rc = -EINVAL;
4902                 goto err;
4903         }
4904
4905         /* FW returned values are in units of 100msec */
4906         info->driver_polling_freq =
4907                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
4908         info->master_func_wait_period =
4909                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
4910         info->normal_func_wait_period =
4911                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
4912         info->master_func_wait_period_after_reset =
4913                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
4914         info->max_bailout_time_after_reset =
4915                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
4916         info->status_regs[BNXT_FW_STATUS_REG] =
4917                 rte_le_to_cpu_32(resp->fw_health_status_reg);
4918         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
4919                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
4920         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
4921                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
4922         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
4923                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
4924         info->reg_array_cnt =
4925                 rte_le_to_cpu_32(resp->reg_array_cnt);
4926
4927         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
4928                 rc = -EINVAL;
4929                 goto err;
4930         }
4931
4932         for (i = 0; i < info->reg_array_cnt; i++) {
4933                 info->reset_reg[i] =
4934                         rte_le_to_cpu_32(resp->reset_reg[i]);
4935                 info->reset_reg_val[i] =
4936                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
4937                 info->delay_after_reset[i] =
4938                         resp->delay_after_reset[i];
4939         }
4940 err:
4941         HWRM_UNLOCK();
4942
4943         /* Map the FW status registers */
4944         if (!rc)
4945                 rc = bnxt_map_fw_health_status_regs(bp);
4946
4947         if (rc) {
4948                 rte_free(bp->recovery_info);
4949                 bp->recovery_info = NULL;
4950         }
4951         return rc;
4952 }
4953
4954 int bnxt_hwrm_fw_reset(struct bnxt *bp)
4955 {
4956         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
4957         struct hwrm_fw_reset_input req = {0};
4958         int rc;
4959
4960         if (!BNXT_PF(bp))
4961                 return -EOPNOTSUPP;
4962
4963         HWRM_PREP(req, FW_RESET, BNXT_USE_KONG(bp));
4964
4965         req.embedded_proc_type =
4966                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
4967         req.selfrst_status =
4968                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
4969         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
4970
4971         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4972                                     BNXT_USE_KONG(bp));
4973
4974         HWRM_CHECK_RESULT();
4975         HWRM_UNLOCK();
4976
4977         return rc;
4978 }
4979
4980 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
4981 {
4982         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
4983         struct hwrm_port_ts_query_input req = {0};
4984         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4985         uint32_t flags = 0;
4986         int rc;
4987
4988         if (!ptp)
4989                 return 0;
4990
4991         HWRM_PREP(req, PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
4992
4993         switch (path) {
4994         case BNXT_PTP_FLAGS_PATH_TX:
4995                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
4996                 break;
4997         case BNXT_PTP_FLAGS_PATH_RX:
4998                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
4999                 break;
5000         case BNXT_PTP_FLAGS_CURRENT_TIME:
5001                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5002                 break;
5003         }
5004
5005         req.flags = rte_cpu_to_le_32(flags);
5006         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
5007
5008         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5009
5010         HWRM_CHECK_RESULT();
5011
5012         if (timestamp) {
5013                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5014                 *timestamp |=
5015                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5016         }
5017         HWRM_UNLOCK();
5018
5019         return rc;
5020 }
5021
5022 int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
5023 {
5024         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
5025                                         bp->hwrm_cmd_resp_addr;
5026         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
5027         uint32_t flags = 0;
5028         int rc = 0;
5029
5030         if (!(bp->flags & BNXT_FLAG_ADV_FLOW_MGMT))
5031                 return rc;
5032
5033         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5034                 PMD_DRV_LOG(DEBUG,
5035                             "Not a PF or trusted VF. Command not supported\n");
5036                 return 0;
5037         }
5038
5039         HWRM_PREP(req, CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp));
5040         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5041
5042         HWRM_CHECK_RESULT();
5043         flags = rte_le_to_cpu_32(resp->flags);
5044         HWRM_UNLOCK();
5045
5046         if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) {
5047                 bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN;
5048                 PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n");
5049         }
5050
5051         return rc;
5052 }