net/bnxt: support 58818 chip family
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14 #include <rte_io.h>
15
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #define HWRM_SPEC_CODE_1_8_3            0x10803
28 #define HWRM_VERSION_1_9_1              0x10901
29 #define HWRM_VERSION_1_9_2              0x10903
30
31 struct bnxt_plcmodes_cfg {
32         uint32_t        flags;
33         uint16_t        jumbo_thresh;
34         uint16_t        hds_offset;
35         uint16_t        hds_threshold;
36 };
37
38 static int page_getenum(size_t size)
39 {
40         if (size <= 1 << 4)
41                 return 4;
42         if (size <= 1 << 12)
43                 return 12;
44         if (size <= 1 << 13)
45                 return 13;
46         if (size <= 1 << 16)
47                 return 16;
48         if (size <= 1 << 21)
49                 return 21;
50         if (size <= 1 << 22)
51                 return 22;
52         if (size <= 1 << 30)
53                 return 30;
54         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55         return sizeof(int) * 8 - 1;
56 }
57
58 static int page_roundup(size_t size)
59 {
60         return 1 << page_getenum(size);
61 }
62
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
64                                   uint8_t *pg_attr,
65                                   uint64_t *pg_dir)
66 {
67         if (rmem->nr_pages > 1) {
68                 *pg_attr = 1;
69                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
70         } else {
71                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
72         }
73 }
74
75 /*
76  * HWRM Functions (sent to HWRM)
77  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
78  * HWRM command times out, or a negative error code if the HWRM
79  * command was failed by the FW.
80  */
81
82 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
83                                   uint32_t msg_len, bool use_kong_mb)
84 {
85         unsigned int i;
86         struct input *req = msg;
87         struct output *resp = bp->hwrm_cmd_resp_addr;
88         uint32_t *data = msg;
89         uint8_t *bar;
90         uint8_t *valid;
91         uint16_t max_req_len = bp->max_req_len;
92         struct hwrm_short_input short_input = { 0 };
93         uint16_t bar_offset = use_kong_mb ?
94                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
95         uint16_t mb_trigger_offset = use_kong_mb ?
96                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
97         uint32_t timeout;
98
99         /* Do not send HWRM commands to firmware in error state */
100         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
101                 return 0;
102
103         timeout = bp->hwrm_cmd_timeout;
104
105         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
106             msg_len > bp->max_req_len) {
107                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
108
109                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
110                 memcpy(short_cmd_req, req, msg_len);
111
112                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
113                 short_input.signature = rte_cpu_to_le_16(
114                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
115                 short_input.size = rte_cpu_to_le_16(msg_len);
116                 short_input.req_addr =
117                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
118
119                 data = (uint32_t *)&short_input;
120                 msg_len = sizeof(short_input);
121
122                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
123         }
124
125         /* Write request msg to hwrm channel */
126         for (i = 0; i < msg_len; i += 4) {
127                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
128                 rte_write32(*data, bar);
129                 data++;
130         }
131
132         /* Zero the rest of the request space */
133         for (; i < max_req_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
135                 rte_write32(0, bar);
136         }
137
138         /* Ring channel doorbell */
139         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
140         rte_write32(1, bar);
141         /*
142          * Make sure the channel doorbell ring command complete before
143          * reading the response to avoid getting stale or invalid
144          * responses.
145          */
146         rte_io_mb();
147
148         /* Poll for the valid bit */
149         for (i = 0; i < timeout; i++) {
150                 /* Sanity check on the resp->resp_len */
151                 rte_io_rmb();
152                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
153                         /* Last byte of resp contains the valid key */
154                         valid = (uint8_t *)resp + resp->resp_len - 1;
155                         if (*valid == HWRM_RESP_VALID_KEY)
156                                 break;
157                 }
158                 rte_delay_us(1);
159         }
160
161         if (i >= timeout) {
162                 /* Suppress VER_GET timeout messages during reset recovery */
163                 if (bp->flags & BNXT_FLAG_FW_RESET &&
164                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
165                         return -ETIMEDOUT;
166
167                 PMD_DRV_LOG(ERR,
168                             "Error(timeout) sending msg 0x%04x, seq_id %d\n",
169                             req->req_type, req->seq_id);
170                 return -ETIMEDOUT;
171         }
172         return 0;
173 }
174
175 /*
176  * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
177  * spinlock, and does initial processing.
178  *
179  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
180  * releases the spinlock only if it returns. If the regular int return codes
181  * are not used by the function, HWRM_CHECK_RESULT() should not be used
182  * directly, rather it should be copied and modified to suit the function.
183  *
184  * HWRM_UNLOCK() must be called after all response processing is completed.
185  */
186 #define HWRM_PREP(req, type, kong) do { \
187         rte_spinlock_lock(&bp->hwrm_lock); \
188         if (bp->hwrm_cmd_resp_addr == NULL) { \
189                 rte_spinlock_unlock(&bp->hwrm_lock); \
190                 return -EACCES; \
191         } \
192         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
193         (req)->req_type = rte_cpu_to_le_16(type); \
194         (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
195         (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
196                 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
197         (req)->target_id = rte_cpu_to_le_16(0xffff); \
198         (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
199 } while (0)
200
201 #define HWRM_CHECK_RESULT_SILENT() do {\
202         if (rc) { \
203                 rte_spinlock_unlock(&bp->hwrm_lock); \
204                 return rc; \
205         } \
206         if (resp->error_code) { \
207                 rc = rte_le_to_cpu_16(resp->error_code); \
208                 rte_spinlock_unlock(&bp->hwrm_lock); \
209                 return rc; \
210         } \
211 } while (0)
212
213 #define HWRM_CHECK_RESULT() do {\
214         if (rc) { \
215                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
216                 rte_spinlock_unlock(&bp->hwrm_lock); \
217                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
218                         rc = -EACCES; \
219                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
220                         rc = -ENOSPC; \
221                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
222                         rc = -EINVAL; \
223                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
224                         rc = -ENOTSUP; \
225                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
226                         rc = -EAGAIN; \
227                 else if (rc > 0) \
228                         rc = -EIO; \
229                 return rc; \
230         } \
231         if (resp->error_code) { \
232                 rc = rte_le_to_cpu_16(resp->error_code); \
233                 if (resp->resp_len >= 16) { \
234                         struct hwrm_err_output *tmp_hwrm_err_op = \
235                                                 (void *)resp; \
236                         PMD_DRV_LOG(ERR, \
237                                 "error %d:%d:%08x:%04x\n", \
238                                 rc, tmp_hwrm_err_op->cmd_err, \
239                                 rte_le_to_cpu_32(\
240                                         tmp_hwrm_err_op->opaque_0), \
241                                 rte_le_to_cpu_16(\
242                                         tmp_hwrm_err_op->opaque_1)); \
243                 } else { \
244                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
245                 } \
246                 rte_spinlock_unlock(&bp->hwrm_lock); \
247                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
248                         rc = -EACCES; \
249                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
250                         rc = -ENOSPC; \
251                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
252                         rc = -EINVAL; \
253                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
254                         rc = -ENOTSUP; \
255                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
256                         rc = -EAGAIN; \
257                 else if (rc > 0) \
258                         rc = -EIO; \
259                 return rc; \
260         } \
261 } while (0)
262
263 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
264
265 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
266                                 bool use_kong_mb,
267                                 uint16_t msg_type,
268                                 void *msg,
269                                 uint32_t msg_len,
270                                 void *resp_msg,
271                                 uint32_t resp_len)
272 {
273         int rc = 0;
274         bool mailbox = BNXT_USE_CHIMP_MB;
275         struct input *req = msg;
276         struct output *resp = bp->hwrm_cmd_resp_addr;
277
278         if (use_kong_mb)
279                 mailbox = BNXT_USE_KONG(bp);
280
281         HWRM_PREP(req, msg_type, mailbox);
282
283         rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
284
285         HWRM_CHECK_RESULT();
286
287         if (resp_msg)
288                 memcpy(resp_msg, resp, resp_len);
289
290         HWRM_UNLOCK();
291
292         return rc;
293 }
294
295 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
296                                   bool use_kong_mb,
297                                   uint16_t tf_type,
298                                   uint16_t tf_subtype,
299                                   uint32_t *tf_response_code,
300                                   void *msg,
301                                   uint32_t msg_len,
302                                   void *response,
303                                   uint32_t response_len)
304 {
305         int rc = 0;
306         struct hwrm_cfa_tflib_input req = { .req_type = 0 };
307         struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
308         bool mailbox = BNXT_USE_CHIMP_MB;
309
310         if (msg_len > sizeof(req.tf_req))
311                 return -ENOMEM;
312
313         if (use_kong_mb)
314                 mailbox = BNXT_USE_KONG(bp);
315
316         HWRM_PREP(&req, HWRM_TF, mailbox);
317         /* Build request using the user supplied request payload.
318          * TLV request size is checked at build time against HWRM
319          * request max size, thus no checking required.
320          */
321         req.tf_type = tf_type;
322         req.tf_subtype = tf_subtype;
323         memcpy(req.tf_req, msg, msg_len);
324
325         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
326         HWRM_CHECK_RESULT();
327
328         /* Copy the resp to user provided response buffer */
329         if (response != NULL)
330                 /* Post process response data. We need to copy only
331                  * the 'payload' as the HWRM data structure really is
332                  * HWRM header + msg header + payload and the TFLIB
333                  * only provided a payload place holder.
334                  */
335                 if (response_len != 0) {
336                         memcpy(response,
337                                resp->tf_resp,
338                                response_len);
339                 }
340
341         /* Extract the internal tflib response code */
342         *tf_response_code = resp->tf_resp_code;
343         HWRM_UNLOCK();
344
345         return rc;
346 }
347
348 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
349 {
350         int rc = 0;
351         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
352         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
353
354         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
355         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
356         req.mask = 0;
357
358         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
359
360         HWRM_CHECK_RESULT();
361         HWRM_UNLOCK();
362
363         return rc;
364 }
365
366 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
367                                  struct bnxt_vnic_info *vnic,
368                                  uint16_t vlan_count,
369                                  struct bnxt_vlan_table_entry *vlan_table)
370 {
371         int rc = 0;
372         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
373         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
374         uint32_t mask = 0;
375
376         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
377                 return rc;
378
379         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
380         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
381
382         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
383                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
384         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
385                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
386
387         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
388                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
389
390         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
391                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
392         } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
393                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
394                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
395                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
396         }
397         if (vlan_table) {
398                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
399                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
400                 req.vlan_tag_tbl_addr =
401                         rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
402                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
403         }
404         req.mask = rte_cpu_to_le_32(mask);
405
406         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
407
408         HWRM_CHECK_RESULT();
409         HWRM_UNLOCK();
410
411         return rc;
412 }
413
414 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
415                         uint16_t vlan_count,
416                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
417 {
418         int rc = 0;
419         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
420         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
421                                                 bp->hwrm_cmd_resp_addr;
422
423         /*
424          * Older HWRM versions did not support this command, and the set_rx_mask
425          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
426          * removed from set_rx_mask call, and this command was added.
427          *
428          * This command is also present from 1.7.8.11 and higher,
429          * as well as 1.7.8.0
430          */
431         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
432                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
433                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
434                                         (11)))
435                                 return 0;
436                 }
437         }
438         HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
439         req.fid = rte_cpu_to_le_16(fid);
440
441         req.vlan_tag_mask_tbl_addr =
442                 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
443         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
444
445         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
446
447         HWRM_CHECK_RESULT();
448         HWRM_UNLOCK();
449
450         return rc;
451 }
452
453 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
454                              struct bnxt_filter_info *filter)
455 {
456         int rc = 0;
457         struct bnxt_filter_info *l2_filter = filter;
458         struct bnxt_vnic_info *vnic = NULL;
459         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
460         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
461
462         if (filter->fw_l2_filter_id == UINT64_MAX)
463                 return 0;
464
465         if (filter->matching_l2_fltr_ptr)
466                 l2_filter = filter->matching_l2_fltr_ptr;
467
468         PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
469                     filter, l2_filter, l2_filter->l2_ref_cnt);
470
471         if (l2_filter->l2_ref_cnt == 0)
472                 return 0;
473
474         if (l2_filter->l2_ref_cnt > 0)
475                 l2_filter->l2_ref_cnt--;
476
477         if (l2_filter->l2_ref_cnt > 0)
478                 return 0;
479
480         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
481
482         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
483
484         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
485
486         HWRM_CHECK_RESULT();
487         HWRM_UNLOCK();
488
489         filter->fw_l2_filter_id = UINT64_MAX;
490         if (l2_filter->l2_ref_cnt == 0) {
491                 vnic = l2_filter->vnic;
492                 if (vnic) {
493                         STAILQ_REMOVE(&vnic->filter, l2_filter,
494                                       bnxt_filter_info, next);
495                         bnxt_free_filter(bp, l2_filter);
496                 }
497         }
498
499         return 0;
500 }
501
502 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
503                          uint16_t dst_id,
504                          struct bnxt_filter_info *filter)
505 {
506         int rc = 0;
507         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
508         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
509         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
510         const struct rte_eth_vmdq_rx_conf *conf =
511                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
512         uint32_t enables = 0;
513         uint16_t j = dst_id - 1;
514
515         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
516         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
517             conf->pool_map[j].pools & (1UL << j)) {
518                 PMD_DRV_LOG(DEBUG,
519                         "Add vlan %u to vmdq pool %u\n",
520                         conf->pool_map[j].vlan_id, j);
521
522                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
523                 filter->enables |=
524                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
525                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
526         }
527
528         if (filter->fw_l2_filter_id != UINT64_MAX)
529                 bnxt_hwrm_clear_l2_filter(bp, filter);
530
531         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
532
533         /* PMD does not support XDP and RoCE */
534         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
535                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
536         req.flags = rte_cpu_to_le_32(filter->flags);
537
538         enables = filter->enables |
539               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
540         req.dst_id = rte_cpu_to_le_16(dst_id);
541
542         if (enables &
543             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
544                 memcpy(req.l2_addr, filter->l2_addr,
545                        RTE_ETHER_ADDR_LEN);
546         if (enables &
547             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
548                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
549                        RTE_ETHER_ADDR_LEN);
550         if (enables &
551             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
552                 req.l2_ovlan = filter->l2_ovlan;
553         if (enables &
554             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
555                 req.l2_ivlan = filter->l2_ivlan;
556         if (enables &
557             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
558                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
559         if (enables &
560             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
561                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
562         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
563                 req.src_id = rte_cpu_to_le_32(filter->src_id);
564         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
565                 req.src_type = filter->src_type;
566         if (filter->pri_hint) {
567                 req.pri_hint = filter->pri_hint;
568                 req.l2_filter_id_hint =
569                         rte_cpu_to_le_64(filter->l2_filter_id_hint);
570         }
571
572         req.enables = rte_cpu_to_le_32(enables);
573
574         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
575
576         HWRM_CHECK_RESULT();
577
578         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
579         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
580         HWRM_UNLOCK();
581
582         filter->l2_ref_cnt++;
583
584         return rc;
585 }
586
587 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
588 {
589         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
590         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
591         uint32_t flags = 0;
592         int rc;
593
594         if (!ptp)
595                 return 0;
596
597         HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
598
599         if (ptp->rx_filter)
600                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
601         else
602                 flags |=
603                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
604         if (ptp->tx_tstamp_en)
605                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
606         else
607                 flags |=
608                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
609         req.flags = rte_cpu_to_le_32(flags);
610         req.enables = rte_cpu_to_le_32
611                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
612         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
613
614         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
615         HWRM_UNLOCK();
616
617         return rc;
618 }
619
620 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
621 {
622         int rc = 0;
623         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
624         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
625         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
626
627         if (ptp)
628                 return 0;
629
630         HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
631
632         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
633
634         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
635
636         HWRM_CHECK_RESULT();
637
638         if (!BNXT_CHIP_P5(bp) &&
639             !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
640                 return 0;
641
642         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
643                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
644
645         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
646         if (!ptp)
647                 return -ENOMEM;
648
649         if (!BNXT_CHIP_P5(bp)) {
650                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
651                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
652                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
653                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
654                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
655                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
656                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
657                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
658                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
659                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
660                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
661                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
662                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
663                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
664                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
665                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
666                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
667                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
668         }
669
670         ptp->bp = bp;
671         bp->ptp_cfg = ptp;
672
673         return 0;
674 }
675
676 void bnxt_hwrm_free_vf_info(struct bnxt *bp)
677 {
678         int i;
679
680         for (i = 0; i < bp->pf->max_vfs; i++) {
681                 rte_free(bp->pf->vf_info[i].vlan_table);
682                 bp->pf->vf_info[i].vlan_table = NULL;
683                 rte_free(bp->pf->vf_info[i].vlan_as_table);
684                 bp->pf->vf_info[i].vlan_as_table = NULL;
685         }
686         rte_free(bp->pf->vf_info);
687         bp->pf->vf_info = NULL;
688 }
689
690 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
691 {
692         int rc = 0;
693         struct hwrm_func_qcaps_input req = {.req_type = 0 };
694         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
695         uint16_t new_max_vfs;
696         uint32_t flags;
697         int i;
698
699         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
700
701         req.fid = rte_cpu_to_le_16(0xffff);
702
703         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
704
705         HWRM_CHECK_RESULT();
706
707         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
708         flags = rte_le_to_cpu_32(resp->flags);
709         if (BNXT_PF(bp)) {
710                 bp->pf->port_id = resp->port_id;
711                 bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
712                 bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
713                 new_max_vfs = bp->pdev->max_vfs;
714                 if (new_max_vfs != bp->pf->max_vfs) {
715                         if (bp->pf->vf_info)
716                                 bnxt_hwrm_free_vf_info(bp);
717                         bp->pf->vf_info = rte_zmalloc("bnxt_vf_info",
718                             sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
719                         if (bp->pf->vf_info == NULL) {
720                                 PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
721                                 HWRM_UNLOCK();
722                                 return -ENOMEM;
723                         }
724                         bp->pf->max_vfs = new_max_vfs;
725                         for (i = 0; i < new_max_vfs; i++) {
726                                 bp->pf->vf_info[i].fid =
727                                         bp->pf->first_vf_id + i;
728                                 bp->pf->vf_info[i].vlan_table =
729                                         rte_zmalloc("VF VLAN table",
730                                                     getpagesize(),
731                                                     getpagesize());
732                                 if (bp->pf->vf_info[i].vlan_table == NULL)
733                                         PMD_DRV_LOG(ERR,
734                                         "Fail to alloc VLAN table for VF %d\n",
735                                         i);
736                                 else
737                                         rte_mem_lock_page(
738                                                 bp->pf->vf_info[i].vlan_table);
739                                 bp->pf->vf_info[i].vlan_as_table =
740                                         rte_zmalloc("VF VLAN AS table",
741                                                     getpagesize(),
742                                                     getpagesize());
743                                 if (bp->pf->vf_info[i].vlan_as_table == NULL)
744                                         PMD_DRV_LOG(ERR,
745                                         "Alloc VLAN AS table for VF %d fail\n",
746                                         i);
747                                 else
748                                         rte_mem_lock_page(
749                                               bp->pf->vf_info[i].vlan_as_table);
750                                 STAILQ_INIT(&bp->pf->vf_info[i].filter);
751                         }
752                 }
753         }
754
755         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
756         if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
757                 bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
758                 memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
759         } else {
760                 bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
761         }
762         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
763         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
764         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
765         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
766         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
767         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
768         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
769         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
770                 bp->max_l2_ctx += bp->max_rx_em_flows;
771         /* TODO: For now, do not support VMDq/RFS on VFs. */
772         if (BNXT_PF(bp)) {
773                 if (bp->pf->max_vfs)
774                         bp->max_vnics = 1;
775                 else
776                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
777         } else {
778                 bp->max_vnics = 1;
779         }
780         PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
781                     bp->max_l2_ctx, bp->max_vnics);
782         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
783         if (BNXT_PF(bp)) {
784                 bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
785                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
786                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
787                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
788                         HWRM_UNLOCK();
789                         bnxt_hwrm_ptp_qcfg(bp);
790                 }
791         }
792
793         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
794                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
795
796         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
797                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
798                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
799         }
800
801         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
802                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
803
804         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
805                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
806
807         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
808                 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
809
810         HWRM_UNLOCK();
811
812         return rc;
813 }
814
815 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
816 {
817         int rc;
818
819         rc = __bnxt_hwrm_func_qcaps(bp);
820         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
821                 rc = bnxt_alloc_ctx_mem(bp);
822                 if (rc)
823                         return rc;
824
825                 /* On older FW,
826                  * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
827                  * But the error can be ignored. Return success.
828                  */
829                 rc = bnxt_hwrm_func_resc_qcaps(bp);
830                 if (!rc)
831                         bp->flags |= BNXT_FLAG_NEW_RM;
832         }
833
834         return 0;
835 }
836
837 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
838 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
839 {
840         int rc = 0;
841         uint32_t flags;
842         struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
843         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
844
845         HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
846
847         req.target_id = rte_cpu_to_le_16(0xffff);
848
849         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
850
851         HWRM_CHECK_RESULT();
852
853         flags = rte_le_to_cpu_32(resp->flags);
854
855         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
856                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
857                 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
858         }
859
860         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
861                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
862
863         bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
864
865         HWRM_UNLOCK();
866
867         return rc;
868 }
869
870 int bnxt_hwrm_func_reset(struct bnxt *bp)
871 {
872         int rc = 0;
873         struct hwrm_func_reset_input req = {.req_type = 0 };
874         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
875
876         HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
877
878         req.enables = rte_cpu_to_le_32(0);
879
880         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
881
882         HWRM_CHECK_RESULT();
883         HWRM_UNLOCK();
884
885         return rc;
886 }
887
888 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
889 {
890         int rc;
891         uint32_t flags = 0;
892         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
893         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
894
895         if (bp->flags & BNXT_FLAG_REGISTERED)
896                 return 0;
897
898         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
899                 flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
900         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
901                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
902
903         /* PFs and trusted VFs should indicate the support of the
904          * Master capability on non Stingray platform
905          */
906         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
907                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
908
909         HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
910         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
911                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
912         req.ver_maj = RTE_VER_YEAR;
913         req.ver_min = RTE_VER_MONTH;
914         req.ver_upd = RTE_VER_MINOR;
915
916         if (BNXT_PF(bp)) {
917                 req.enables |= rte_cpu_to_le_32(
918                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
919                 memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
920                        RTE_MIN(sizeof(req.vf_req_fwd),
921                                sizeof(bp->pf->vf_req_fwd)));
922         }
923
924         req.flags = rte_cpu_to_le_32(flags);
925
926         req.async_event_fwd[0] |=
927                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
928                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
929                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
930                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
931                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
932         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
933                 req.async_event_fwd[0] |=
934                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
935         req.async_event_fwd[1] |=
936                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
937                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
938         if (BNXT_PF(bp))
939                 req.async_event_fwd[1] |=
940                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
941
942         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
943                 req.async_event_fwd[1] |=
944                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
945
946         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
947
948         HWRM_CHECK_RESULT();
949
950         flags = rte_le_to_cpu_32(resp->flags);
951         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
952                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
953
954         HWRM_UNLOCK();
955
956         bp->flags |= BNXT_FLAG_REGISTERED;
957
958         return rc;
959 }
960
961 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
962 {
963         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
964                 return 0;
965
966         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
967 }
968
969 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
970 {
971         int rc;
972         uint32_t flags = 0;
973         uint32_t enables;
974         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
975         struct hwrm_func_vf_cfg_input req = {0};
976
977         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
978
979         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
980                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
981                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
982                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
983                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
984
985         if (BNXT_HAS_RING_GRPS(bp)) {
986                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
987                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
988         }
989
990         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
991         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
992                                             AGG_RING_MULTIPLIER);
993         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
994         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
995                                               bp->tx_nr_rings +
996                                               BNXT_NUM_ASYNC_CPR(bp));
997         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
998         if (bp->vf_resv_strategy ==
999             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
1000                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1001                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1002                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1003                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1004                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1005                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1006         } else if (bp->vf_resv_strategy ==
1007                    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1008                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1009                 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1010         }
1011
1012         if (test)
1013                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1014                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1015                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1016                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1017                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1018                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1019
1020         if (test && BNXT_HAS_RING_GRPS(bp))
1021                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1022
1023         req.flags = rte_cpu_to_le_32(flags);
1024         req.enables |= rte_cpu_to_le_32(enables);
1025
1026         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1027
1028         if (test)
1029                 HWRM_CHECK_RESULT_SILENT();
1030         else
1031                 HWRM_CHECK_RESULT();
1032
1033         HWRM_UNLOCK();
1034         return rc;
1035 }
1036
1037 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1038 {
1039         int rc;
1040         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1041         struct hwrm_func_resource_qcaps_input req = {0};
1042
1043         HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1044         req.fid = rte_cpu_to_le_16(0xffff);
1045
1046         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1047
1048         HWRM_CHECK_RESULT_SILENT();
1049
1050         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1051         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1052         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1053         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1054         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1055         /* func_resource_qcaps does not return max_rx_em_flows.
1056          * So use the value provided by func_qcaps.
1057          */
1058         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1059         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
1060                 bp->max_l2_ctx += bp->max_rx_em_flows;
1061         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1062         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1063         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1064         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1065         if (bp->vf_resv_strategy >
1066             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1067                 bp->vf_resv_strategy =
1068                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1069
1070         HWRM_UNLOCK();
1071         return rc;
1072 }
1073
1074 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1075 {
1076         int rc = 0;
1077         struct hwrm_ver_get_input req = {.req_type = 0 };
1078         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1079         uint32_t fw_version;
1080         uint16_t max_resp_len;
1081         char type[RTE_MEMZONE_NAMESIZE];
1082         uint32_t dev_caps_cfg;
1083
1084         bp->max_req_len = HWRM_MAX_REQ_LEN;
1085         bp->hwrm_cmd_timeout = timeout;
1086         HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1087
1088         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1089         req.hwrm_intf_min = HWRM_VERSION_MINOR;
1090         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1091
1092         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1093
1094         if (bp->flags & BNXT_FLAG_FW_RESET)
1095                 HWRM_CHECK_RESULT_SILENT();
1096         else
1097                 HWRM_CHECK_RESULT();
1098
1099         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
1100                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1101                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1102                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
1103         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
1104                      (resp->hwrm_fw_min_8b << 16) |
1105                      (resp->hwrm_fw_bld_8b << 8) |
1106                      resp->hwrm_fw_rsvd_8b;
1107         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1108                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1109
1110         fw_version = resp->hwrm_intf_maj_8b << 16;
1111         fw_version |= resp->hwrm_intf_min_8b << 8;
1112         fw_version |= resp->hwrm_intf_upd_8b;
1113         bp->hwrm_spec_code = fw_version;
1114
1115         /* def_req_timeout value is in milliseconds */
1116         bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1117         /* convert timeout to usec */
1118         bp->hwrm_cmd_timeout *= 1000;
1119         if (!bp->hwrm_cmd_timeout)
1120                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1121
1122         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1123                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1124                 rc = -EINVAL;
1125                 goto error;
1126         }
1127
1128         if (bp->max_req_len > resp->max_req_win_len) {
1129                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
1130                 rc = -EINVAL;
1131         }
1132
1133         bp->chip_num = rte_le_to_cpu_16(resp->chip_num);
1134
1135         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1136         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1137         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1138                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1139
1140         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1141         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1142
1143         if (bp->max_resp_len != max_resp_len) {
1144                 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
1145                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1146                         bp->pdev->addr.devid, bp->pdev->addr.function);
1147
1148                 rte_free(bp->hwrm_cmd_resp_addr);
1149
1150                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
1151                 if (bp->hwrm_cmd_resp_addr == NULL) {
1152                         rc = -ENOMEM;
1153                         goto error;
1154                 }
1155                 bp->hwrm_cmd_resp_dma_addr =
1156                         rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
1157                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1158                         PMD_DRV_LOG(ERR,
1159                         "Unable to map response buffer to physical memory.\n");
1160                         rc = -ENOMEM;
1161                         goto error;
1162                 }
1163                 bp->max_resp_len = max_resp_len;
1164         }
1165
1166         if ((dev_caps_cfg &
1167                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1168             (dev_caps_cfg &
1169              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1170                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1171                 bp->flags |= BNXT_FLAG_SHORT_CMD;
1172         }
1173
1174         if (((dev_caps_cfg &
1175               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1176              (dev_caps_cfg &
1177               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1178             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1179                 sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1180                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1181                         bp->pdev->addr.devid, bp->pdev->addr.function);
1182
1183                 rte_free(bp->hwrm_short_cmd_req_addr);
1184
1185                 bp->hwrm_short_cmd_req_addr =
1186                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1187                 if (bp->hwrm_short_cmd_req_addr == NULL) {
1188                         rc = -ENOMEM;
1189                         goto error;
1190                 }
1191                 bp->hwrm_short_cmd_req_dma_addr =
1192                         rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1193                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1194                         rte_free(bp->hwrm_short_cmd_req_addr);
1195                         PMD_DRV_LOG(ERR,
1196                                 "Unable to map buffer to physical memory.\n");
1197                         rc = -ENOMEM;
1198                         goto error;
1199                 }
1200         }
1201         if (dev_caps_cfg &
1202             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1203                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1204                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1205         }
1206         if (dev_caps_cfg &
1207             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1208                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1209         if (dev_caps_cfg &
1210             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1211                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1212                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1213         }
1214
1215         if (dev_caps_cfg &
1216             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1217                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1218                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1219         }
1220
1221
1222 error:
1223         HWRM_UNLOCK();
1224         return rc;
1225 }
1226
1227 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1228 {
1229         int rc;
1230         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1231         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1232
1233         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1234                 return 0;
1235
1236         HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1237         req.flags = flags;
1238
1239         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1240
1241         HWRM_CHECK_RESULT();
1242         HWRM_UNLOCK();
1243
1244         return rc;
1245 }
1246
1247 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1248 {
1249         int rc = 0;
1250         struct hwrm_port_phy_cfg_input req = {0};
1251         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1252         uint32_t enables = 0;
1253
1254         HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1255
1256         if (conf->link_up) {
1257                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1258                 if (bp->link_info->auto_mode && conf->link_speed) {
1259                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1260                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1261                 }
1262
1263                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1264                 /*
1265                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1266                  * any auto mode, even "none".
1267                  */
1268                 if (!conf->link_speed) {
1269                         /* No speeds specified. Enable AutoNeg - all speeds */
1270                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1271                         req.auto_mode =
1272                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1273                 } else {
1274                         if (bp->link_info->link_signal_mode) {
1275                                 enables |=
1276                                 HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
1277                                 req.force_pam4_link_speed =
1278                                         rte_cpu_to_le_16(conf->link_speed);
1279                         } else {
1280                                 req.force_link_speed =
1281                                         rte_cpu_to_le_16(conf->link_speed);
1282                         }
1283                 }
1284                 /* AutoNeg - Advertise speeds specified. */
1285                 if (conf->auto_link_speed_mask &&
1286                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1287                         req.auto_mode =
1288                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1289                         req.auto_link_speed_mask =
1290                                 conf->auto_link_speed_mask;
1291                         if (conf->auto_pam4_link_speeds) {
1292                                 enables |=
1293                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
1294                                 req.auto_link_pam4_speed_mask =
1295                                         conf->auto_pam4_link_speeds;
1296                         } else {
1297                                 enables |=
1298                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
1299                         }
1300                 }
1301                 if (conf->auto_link_speed &&
1302                 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
1303                         enables |=
1304                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
1305
1306                 req.auto_duplex = conf->duplex;
1307                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1308                 req.auto_pause = conf->auto_pause;
1309                 req.force_pause = conf->force_pause;
1310                 /* Set force_pause if there is no auto or if there is a force */
1311                 if (req.auto_pause && !req.force_pause)
1312                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1313                 else
1314                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1315
1316                 req.enables = rte_cpu_to_le_32(enables);
1317         } else {
1318                 req.flags =
1319                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1320                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1321         }
1322
1323         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1324
1325         HWRM_CHECK_RESULT();
1326         HWRM_UNLOCK();
1327
1328         return rc;
1329 }
1330
1331 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1332                                    struct bnxt_link_info *link_info)
1333 {
1334         int rc = 0;
1335         struct hwrm_port_phy_qcfg_input req = {0};
1336         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1337
1338         HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1339
1340         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1341
1342         HWRM_CHECK_RESULT();
1343
1344         link_info->phy_link_status = resp->link;
1345         link_info->link_up =
1346                 (link_info->phy_link_status ==
1347                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1348         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1349         link_info->duplex = resp->duplex_cfg;
1350         link_info->pause = resp->pause;
1351         link_info->auto_pause = resp->auto_pause;
1352         link_info->force_pause = resp->force_pause;
1353         link_info->auto_mode = resp->auto_mode;
1354         link_info->phy_type = resp->phy_type;
1355         link_info->media_type = resp->media_type;
1356
1357         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1358         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1359         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1360         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1361         link_info->phy_ver[0] = resp->phy_maj;
1362         link_info->phy_ver[1] = resp->phy_min;
1363         link_info->phy_ver[2] = resp->phy_bld;
1364         link_info->link_signal_mode =
1365                 rte_le_to_cpu_16(resp->active_fec_signal_mode);
1366         link_info->force_pam4_link_speed =
1367                         rte_le_to_cpu_16(resp->force_pam4_link_speed);
1368         link_info->support_pam4_speeds =
1369                         rte_le_to_cpu_16(resp->support_pam4_speeds);
1370         link_info->auto_pam4_link_speeds =
1371                         rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
1372         HWRM_UNLOCK();
1373
1374         PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
1375                     link_info->link_speed, link_info->auto_mode,
1376                     link_info->auto_link_speed, link_info->auto_link_speed_mask,
1377                     link_info->support_speeds, link_info->force_link_speed);
1378         PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
1379                     link_info->link_signal_mode,
1380                     link_info->auto_pam4_link_speeds,
1381                     link_info->support_pam4_speeds,
1382                     link_info->force_pam4_link_speed);
1383         return rc;
1384 }
1385
1386 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1387 {
1388         int rc = 0;
1389         struct hwrm_port_phy_qcaps_input req = {0};
1390         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1391         struct bnxt_link_info *link_info = bp->link_info;
1392
1393         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1394                 return 0;
1395
1396         HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1397
1398         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1399
1400         HWRM_CHECK_RESULT();
1401
1402         bp->port_cnt = resp->port_cnt;
1403         if (resp->supported_speeds_auto_mode)
1404                 link_info->support_auto_speeds =
1405                         rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
1406         if (resp->supported_pam4_speeds_auto_mode)
1407                 link_info->support_pam4_auto_speeds =
1408                         rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
1409
1410         HWRM_UNLOCK();
1411
1412         return 0;
1413 }
1414
1415 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1416 {
1417         int i = 0;
1418
1419         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1420                 if (bp->tx_cos_queue[i].profile ==
1421                     HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1422                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1423                         return true;
1424                 }
1425         }
1426         return false;
1427 }
1428
1429 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1430 {
1431         int i = 0;
1432
1433         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1434                 if (bp->tx_cos_queue[i].profile !=
1435                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1436                     bp->tx_cos_queue[i].id !=
1437                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1438                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1439                         break;
1440                 }
1441         }
1442 }
1443
1444 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1445 {
1446         int rc = 0;
1447         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1448         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1449         uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1450         int i;
1451
1452 get_rx_info:
1453         HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1454
1455         req.flags = rte_cpu_to_le_32(dir);
1456         /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1457         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1458             !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1459                 req.drv_qmap_cap =
1460                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1461         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1462
1463         HWRM_CHECK_RESULT();
1464
1465         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1466                 GET_TX_QUEUE_INFO(0);
1467                 GET_TX_QUEUE_INFO(1);
1468                 GET_TX_QUEUE_INFO(2);
1469                 GET_TX_QUEUE_INFO(3);
1470                 GET_TX_QUEUE_INFO(4);
1471                 GET_TX_QUEUE_INFO(5);
1472                 GET_TX_QUEUE_INFO(6);
1473                 GET_TX_QUEUE_INFO(7);
1474         } else  {
1475                 GET_RX_QUEUE_INFO(0);
1476                 GET_RX_QUEUE_INFO(1);
1477                 GET_RX_QUEUE_INFO(2);
1478                 GET_RX_QUEUE_INFO(3);
1479                 GET_RX_QUEUE_INFO(4);
1480                 GET_RX_QUEUE_INFO(5);
1481                 GET_RX_QUEUE_INFO(6);
1482                 GET_RX_QUEUE_INFO(7);
1483         }
1484
1485         HWRM_UNLOCK();
1486
1487         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1488                 goto done;
1489
1490         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1491                 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1492         } else {
1493                 int j;
1494
1495                 /* iterate and find the COSq profile to use for Tx */
1496                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1497                         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1498                                 if (bp->tx_cos_queue[i].id != 0xff)
1499                                         bp->tx_cosq_id[j++] =
1500                                                 bp->tx_cos_queue[i].id;
1501                         }
1502                 } else {
1503                         /* When CoS classification is disabled, for normal NIC
1504                          * operations, ideally we should look to use LOSSY.
1505                          * If not found, fallback to the first valid profile
1506                          */
1507                         if (!bnxt_find_lossy_profile(bp))
1508                                 bnxt_find_first_valid_profile(bp);
1509
1510                 }
1511         }
1512
1513         bp->max_tc = resp->max_configurable_queues;
1514         bp->max_lltc = resp->max_configurable_lossless_queues;
1515         if (bp->max_tc > BNXT_MAX_QUEUE)
1516                 bp->max_tc = BNXT_MAX_QUEUE;
1517         bp->max_q = bp->max_tc;
1518
1519         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1520                 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1521                 goto get_rx_info;
1522         }
1523
1524 done:
1525         return rc;
1526 }
1527
1528 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1529                          struct bnxt_ring *ring,
1530                          uint32_t ring_type, uint32_t map_index,
1531                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1532                          uint16_t tx_cosq_id)
1533 {
1534         int rc = 0;
1535         uint32_t enables = 0;
1536         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1537         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1538         struct rte_mempool *mb_pool;
1539         uint16_t rx_buf_size;
1540
1541         HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1542
1543         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1544         req.fbo = rte_cpu_to_le_32(0);
1545         /* Association of ring index with doorbell index */
1546         req.logical_id = rte_cpu_to_le_16(map_index);
1547         req.length = rte_cpu_to_le_32(ring->ring_size);
1548
1549         switch (ring_type) {
1550         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1551                 req.ring_type = ring_type;
1552                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1553                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1554                 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1555                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1556                         enables |=
1557                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1558                 break;
1559         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1560                 req.ring_type = ring_type;
1561                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1562                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1563                 if (BNXT_CHIP_P5(bp)) {
1564                         mb_pool = bp->rx_queues[0]->mb_pool;
1565                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1566                                       RTE_PKTMBUF_HEADROOM;
1567                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1568                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1569                         enables |=
1570                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1571                 }
1572                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1573                         enables |=
1574                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1575                 break;
1576         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1577                 req.ring_type = ring_type;
1578                 if (BNXT_HAS_NQ(bp)) {
1579                         /* Association of cp ring with nq */
1580                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1581                         enables |=
1582                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1583                 }
1584                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1585                 break;
1586         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1587                 req.ring_type = ring_type;
1588                 req.page_size = BNXT_PAGE_SHFT;
1589                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1590                 break;
1591         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1592                 req.ring_type = ring_type;
1593                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1594
1595                 mb_pool = bp->rx_queues[0]->mb_pool;
1596                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1597                               RTE_PKTMBUF_HEADROOM;
1598                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1599                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1600
1601                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1602                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1603                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1604                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1605                 break;
1606         default:
1607                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1608                         ring_type);
1609                 HWRM_UNLOCK();
1610                 return -EINVAL;
1611         }
1612         req.enables = rte_cpu_to_le_32(enables);
1613
1614         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1615
1616         if (rc || resp->error_code) {
1617                 if (rc == 0 && resp->error_code)
1618                         rc = rte_le_to_cpu_16(resp->error_code);
1619                 switch (ring_type) {
1620                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1621                         PMD_DRV_LOG(ERR,
1622                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1623                         HWRM_UNLOCK();
1624                         return rc;
1625                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1626                         PMD_DRV_LOG(ERR,
1627                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1628                         HWRM_UNLOCK();
1629                         return rc;
1630                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1631                         PMD_DRV_LOG(ERR,
1632                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1633                                     rc);
1634                         HWRM_UNLOCK();
1635                         return rc;
1636                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1637                         PMD_DRV_LOG(ERR,
1638                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1639                         HWRM_UNLOCK();
1640                         return rc;
1641                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1642                         PMD_DRV_LOG(ERR,
1643                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1644                         HWRM_UNLOCK();
1645                         return rc;
1646                 default:
1647                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1648                         HWRM_UNLOCK();
1649                         return rc;
1650                 }
1651         }
1652
1653         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1654         HWRM_UNLOCK();
1655         return rc;
1656 }
1657
1658 int bnxt_hwrm_ring_free(struct bnxt *bp,
1659                         struct bnxt_ring *ring, uint32_t ring_type)
1660 {
1661         int rc;
1662         struct hwrm_ring_free_input req = {.req_type = 0 };
1663         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1664
1665         HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1666
1667         req.ring_type = ring_type;
1668         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1669
1670         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1671
1672         if (rc || resp->error_code) {
1673                 if (rc == 0 && resp->error_code)
1674                         rc = rte_le_to_cpu_16(resp->error_code);
1675                 HWRM_UNLOCK();
1676
1677                 switch (ring_type) {
1678                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1679                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1680                                 rc);
1681                         return rc;
1682                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1683                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1684                                 rc);
1685                         return rc;
1686                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1687                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1688                                 rc);
1689                         return rc;
1690                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1691                         PMD_DRV_LOG(ERR,
1692                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1693                         return rc;
1694                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1695                         PMD_DRV_LOG(ERR,
1696                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1697                         return rc;
1698                 default:
1699                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1700                         return rc;
1701                 }
1702         }
1703         HWRM_UNLOCK();
1704         return 0;
1705 }
1706
1707 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1708 {
1709         int rc = 0;
1710         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1711         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1712
1713         HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1714
1715         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1716         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1717         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1718         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1719
1720         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1721
1722         HWRM_CHECK_RESULT();
1723
1724         bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1725
1726         HWRM_UNLOCK();
1727
1728         return rc;
1729 }
1730
1731 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1732 {
1733         int rc;
1734         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1735         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1736
1737         HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1738
1739         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1740
1741         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1742
1743         HWRM_CHECK_RESULT();
1744         HWRM_UNLOCK();
1745
1746         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1747         return rc;
1748 }
1749
1750 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1751 {
1752         int rc = 0;
1753         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1754         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1755
1756         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1757                 return rc;
1758
1759         HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1760
1761         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1762
1763         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1764
1765         HWRM_CHECK_RESULT();
1766         HWRM_UNLOCK();
1767
1768         return rc;
1769 }
1770
1771 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1772                                 unsigned int idx __rte_unused)
1773 {
1774         int rc;
1775         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1776         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1777
1778         HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1779
1780         req.update_period_ms = rte_cpu_to_le_32(0);
1781
1782         req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1783
1784         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1785
1786         HWRM_CHECK_RESULT();
1787
1788         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1789
1790         HWRM_UNLOCK();
1791
1792         return rc;
1793 }
1794
1795 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1796                                 unsigned int idx __rte_unused)
1797 {
1798         int rc;
1799         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1800         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1801
1802         HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1803
1804         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1805
1806         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1807
1808         HWRM_CHECK_RESULT();
1809         HWRM_UNLOCK();
1810
1811         return rc;
1812 }
1813
1814 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1815 {
1816         int rc = 0, i, j;
1817         struct hwrm_vnic_alloc_input req = { 0 };
1818         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1819
1820         if (!BNXT_HAS_RING_GRPS(bp))
1821                 goto skip_ring_grps;
1822
1823         /* map ring groups to this vnic */
1824         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1825                 vnic->start_grp_id, vnic->end_grp_id);
1826         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1827                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1828
1829         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1830         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1831         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1832         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1833
1834 skip_ring_grps:
1835         vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
1836         HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1837
1838         if (vnic->func_default)
1839                 req.flags =
1840                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1841         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1842
1843         HWRM_CHECK_RESULT();
1844
1845         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1846         HWRM_UNLOCK();
1847         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1848         return rc;
1849 }
1850
1851 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1852                                         struct bnxt_vnic_info *vnic,
1853                                         struct bnxt_plcmodes_cfg *pmode)
1854 {
1855         int rc = 0;
1856         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1857         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1858
1859         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1860
1861         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1862
1863         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1864
1865         HWRM_CHECK_RESULT();
1866
1867         pmode->flags = rte_le_to_cpu_32(resp->flags);
1868         /* dflt_vnic bit doesn't exist in the _cfg command */
1869         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1870         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1871         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1872         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1873
1874         HWRM_UNLOCK();
1875
1876         return rc;
1877 }
1878
1879 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1880                                        struct bnxt_vnic_info *vnic,
1881                                        struct bnxt_plcmodes_cfg *pmode)
1882 {
1883         int rc = 0;
1884         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1885         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1886
1887         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1888                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1889                 return rc;
1890         }
1891
1892         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1893
1894         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1895         req.flags = rte_cpu_to_le_32(pmode->flags);
1896         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1897         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1898         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1899         req.enables = rte_cpu_to_le_32(
1900             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1901             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1902             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1903         );
1904
1905         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1906
1907         HWRM_CHECK_RESULT();
1908         HWRM_UNLOCK();
1909
1910         return rc;
1911 }
1912
1913 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1914 {
1915         int rc = 0;
1916         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1917         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1918         struct bnxt_plcmodes_cfg pmodes = { 0 };
1919         uint32_t ctx_enable_flag = 0;
1920         uint32_t enables = 0;
1921
1922         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1923                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1924                 return rc;
1925         }
1926
1927         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1928         if (rc)
1929                 return rc;
1930
1931         HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
1932
1933         if (BNXT_CHIP_P5(bp)) {
1934                 int dflt_rxq = vnic->start_grp_id;
1935                 struct bnxt_rx_ring_info *rxr;
1936                 struct bnxt_cp_ring_info *cpr;
1937                 struct bnxt_rx_queue *rxq;
1938                 int i;
1939
1940                 /*
1941                  * The first active receive ring is used as the VNIC
1942                  * default receive ring. If there are no active receive
1943                  * rings (all corresponding receive queues are stopped),
1944                  * the first receive ring is used.
1945                  */
1946                 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
1947                         rxq = bp->eth_dev->data->rx_queues[i];
1948                         if (rxq->rx_started) {
1949                                 dflt_rxq = i;
1950                                 break;
1951                         }
1952                 }
1953
1954                 rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
1955                 rxr = rxq->rx_ring;
1956                 cpr = rxq->cp_ring;
1957
1958                 req.default_rx_ring_id =
1959                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1960                 req.default_cmpl_ring_id =
1961                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1962                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1963                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1964                 goto config_mru;
1965         }
1966
1967         /* Only RSS support for now TBD: COS & LB */
1968         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1969         if (vnic->lb_rule != 0xffff)
1970                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1971         if (vnic->cos_rule != 0xffff)
1972                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1973         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1974                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1975                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1976         }
1977         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1978                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
1979                 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
1980         }
1981
1982         enables |= ctx_enable_flag;
1983         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1984         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1985         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1986         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1987
1988 config_mru:
1989         req.enables = rte_cpu_to_le_32(enables);
1990         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1991         req.mru = rte_cpu_to_le_16(vnic->mru);
1992         /* Configure default VNIC only once. */
1993         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1994                 req.flags |=
1995                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1996                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1997         }
1998         if (vnic->vlan_strip)
1999                 req.flags |=
2000                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2001         if (vnic->bd_stall)
2002                 req.flags |=
2003                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2004         if (vnic->roce_dual)
2005                 req.flags |= rte_cpu_to_le_32(
2006                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
2007         if (vnic->roce_only)
2008                 req.flags |= rte_cpu_to_le_32(
2009                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
2010         if (vnic->rss_dflt_cr)
2011                 req.flags |= rte_cpu_to_le_32(
2012                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
2013
2014         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2015
2016         HWRM_CHECK_RESULT();
2017         HWRM_UNLOCK();
2018
2019         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
2020
2021         return rc;
2022 }
2023
2024 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
2025                 int16_t fw_vf_id)
2026 {
2027         int rc = 0;
2028         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
2029         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2030
2031         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2032                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
2033                 return rc;
2034         }
2035         HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2036
2037         req.enables =
2038                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2039         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2040         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2041
2042         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2043
2044         HWRM_CHECK_RESULT();
2045
2046         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2047         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2048         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2049         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2050         vnic->mru = rte_le_to_cpu_16(resp->mru);
2051         vnic->func_default = rte_le_to_cpu_32(
2052                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2053         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2054                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2055         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2056                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2057         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
2058                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
2059         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
2060                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
2061         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2062                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2063
2064         HWRM_UNLOCK();
2065
2066         return rc;
2067 }
2068
2069 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2070                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2071 {
2072         int rc = 0;
2073         uint16_t ctx_id;
2074         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2075         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2076                                                 bp->hwrm_cmd_resp_addr;
2077
2078         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2079
2080         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2081         HWRM_CHECK_RESULT();
2082
2083         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2084         if (!BNXT_HAS_RING_GRPS(bp))
2085                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
2086         else if (ctx_idx == 0)
2087                 vnic->rss_rule = ctx_id;
2088
2089         HWRM_UNLOCK();
2090
2091         return rc;
2092 }
2093
2094 static
2095 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2096                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2097 {
2098         int rc = 0;
2099         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2100         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2101                                                 bp->hwrm_cmd_resp_addr;
2102
2103         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2104                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2105                 return rc;
2106         }
2107         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2108
2109         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2110
2111         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2112
2113         HWRM_CHECK_RESULT();
2114         HWRM_UNLOCK();
2115
2116         return rc;
2117 }
2118
2119 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2120 {
2121         int rc = 0;
2122
2123         if (BNXT_CHIP_P5(bp)) {
2124                 int j;
2125
2126                 for (j = 0; j < vnic->num_lb_ctxts; j++) {
2127                         rc = _bnxt_hwrm_vnic_ctx_free(bp,
2128                                                       vnic,
2129                                                       vnic->fw_grp_ids[j]);
2130                         vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2131                 }
2132                 vnic->num_lb_ctxts = 0;
2133         } else {
2134                 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2135                 vnic->rss_rule = INVALID_HW_RING_ID;
2136         }
2137
2138         return rc;
2139 }
2140
2141 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2142 {
2143         int rc = 0;
2144         struct hwrm_vnic_free_input req = {.req_type = 0 };
2145         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2146
2147         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2148                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2149                 return rc;
2150         }
2151
2152         HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2153
2154         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2155
2156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2157
2158         HWRM_CHECK_RESULT();
2159         HWRM_UNLOCK();
2160
2161         vnic->fw_vnic_id = INVALID_HW_RING_ID;
2162         /* Configure default VNIC again if necessary. */
2163         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2164                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2165
2166         return rc;
2167 }
2168
2169 static int
2170 bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2171 {
2172         int i;
2173         int rc = 0;
2174         int nr_ctxs = vnic->num_lb_ctxts;
2175         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2176         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2177
2178         for (i = 0; i < nr_ctxs; i++) {
2179                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2180
2181                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2182                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2183                 req.hash_mode_flags = vnic->hash_mode;
2184
2185                 req.hash_key_tbl_addr =
2186                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2187
2188                 req.ring_grp_tbl_addr =
2189                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2190                                          i * HW_HASH_INDEX_SIZE);
2191                 req.ring_table_pair_index = i;
2192                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2193
2194                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2195                                             BNXT_USE_CHIMP_MB);
2196
2197                 HWRM_CHECK_RESULT();
2198                 HWRM_UNLOCK();
2199         }
2200
2201         return rc;
2202 }
2203
2204 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2205                            struct bnxt_vnic_info *vnic)
2206 {
2207         int rc = 0;
2208         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2209         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2210
2211         if (!vnic->rss_table)
2212                 return 0;
2213
2214         if (BNXT_CHIP_P5(bp))
2215                 return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
2216
2217         HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2218
2219         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2220         req.hash_mode_flags = vnic->hash_mode;
2221
2222         req.ring_grp_tbl_addr =
2223             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2224         req.hash_key_tbl_addr =
2225             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2226         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2227         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2228
2229         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2230
2231         HWRM_CHECK_RESULT();
2232         HWRM_UNLOCK();
2233
2234         return rc;
2235 }
2236
2237 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2238                         struct bnxt_vnic_info *vnic)
2239 {
2240         int rc = 0;
2241         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2242         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2243         uint16_t size;
2244
2245         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2246                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2247                 return rc;
2248         }
2249
2250         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2251
2252         req.flags = rte_cpu_to_le_32(
2253                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2254
2255         req.enables = rte_cpu_to_le_32(
2256                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2257
2258         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2259         size -= RTE_PKTMBUF_HEADROOM;
2260         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2261
2262         req.jumbo_thresh = rte_cpu_to_le_16(size);
2263         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2264
2265         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2266
2267         HWRM_CHECK_RESULT();
2268         HWRM_UNLOCK();
2269
2270         return rc;
2271 }
2272
2273 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2274                         struct bnxt_vnic_info *vnic, bool enable)
2275 {
2276         int rc = 0;
2277         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2278         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2279
2280         if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
2281                 if (enable)
2282                         PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2283                 return -ENOTSUP;
2284         }
2285
2286         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2287                 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2288                 return 0;
2289         }
2290
2291         HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2292
2293         if (enable) {
2294                 req.enables = rte_cpu_to_le_32(
2295                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2296                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2297                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2298                 req.flags = rte_cpu_to_le_32(
2299                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2300                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2301                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2302                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2303                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2304                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2305                 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2306                 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2307                 req.min_agg_len = rte_cpu_to_le_32(512);
2308         }
2309         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2310
2311         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2312
2313         HWRM_CHECK_RESULT();
2314         HWRM_UNLOCK();
2315
2316         return rc;
2317 }
2318
2319 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2320 {
2321         struct hwrm_func_cfg_input req = {0};
2322         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2323         int rc;
2324
2325         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2326         req.enables = rte_cpu_to_le_32(
2327                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2328         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2329         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2330
2331         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2332
2333         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2334         HWRM_CHECK_RESULT();
2335         HWRM_UNLOCK();
2336
2337         bp->pf->vf_info[vf].random_mac = false;
2338
2339         return rc;
2340 }
2341
2342 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2343                                   uint64_t *dropped)
2344 {
2345         int rc = 0;
2346         struct hwrm_func_qstats_input req = {.req_type = 0};
2347         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2348
2349         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2350
2351         req.fid = rte_cpu_to_le_16(fid);
2352
2353         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2354
2355         HWRM_CHECK_RESULT();
2356
2357         if (dropped)
2358                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2359
2360         HWRM_UNLOCK();
2361
2362         return rc;
2363 }
2364
2365 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2366                           struct rte_eth_stats *stats,
2367                           struct hwrm_func_qstats_output *func_qstats)
2368 {
2369         int rc = 0;
2370         struct hwrm_func_qstats_input req = {.req_type = 0};
2371         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2372
2373         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2374
2375         req.fid = rte_cpu_to_le_16(fid);
2376
2377         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2378
2379         HWRM_CHECK_RESULT();
2380         if (func_qstats)
2381                 memcpy(func_qstats, resp,
2382                        sizeof(struct hwrm_func_qstats_output));
2383
2384         if (!stats)
2385                 goto exit;
2386
2387         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2388         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2389         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2390         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2391         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2392         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2393
2394         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2395         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2396         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2397         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2398         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2399         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2400
2401         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2402         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2403         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2404
2405 exit:
2406         HWRM_UNLOCK();
2407
2408         return rc;
2409 }
2410
2411 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2412 {
2413         int rc = 0;
2414         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2415         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2416
2417         HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2418
2419         req.fid = rte_cpu_to_le_16(fid);
2420
2421         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2422
2423         HWRM_CHECK_RESULT();
2424         HWRM_UNLOCK();
2425
2426         return rc;
2427 }
2428
2429 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2430 {
2431         unsigned int i;
2432         int rc = 0;
2433
2434         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2435                 struct bnxt_tx_queue *txq;
2436                 struct bnxt_rx_queue *rxq;
2437                 struct bnxt_cp_ring_info *cpr;
2438
2439                 if (i >= bp->rx_cp_nr_rings) {
2440                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2441                         cpr = txq->cp_ring;
2442                 } else {
2443                         rxq = bp->rx_queues[i];
2444                         cpr = rxq->cp_ring;
2445                 }
2446
2447                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2448                 if (rc)
2449                         return rc;
2450         }
2451         return 0;
2452 }
2453
2454 static int
2455 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2456 {
2457         int rc;
2458         unsigned int i;
2459         struct bnxt_cp_ring_info *cpr;
2460
2461         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2462
2463                 if (i >= bp->rx_cp_nr_rings) {
2464                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2465                 } else {
2466                         cpr = bp->rx_queues[i]->cp_ring;
2467                         if (BNXT_HAS_RING_GRPS(bp))
2468                                 bp->grp_info[i].fw_stats_ctx = -1;
2469                 }
2470                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2471                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2472                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2473                         if (rc)
2474                                 return rc;
2475                 }
2476         }
2477         return 0;
2478 }
2479
2480 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2481 {
2482         unsigned int i;
2483         int rc = 0;
2484
2485         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2486                 struct bnxt_tx_queue *txq;
2487                 struct bnxt_rx_queue *rxq;
2488                 struct bnxt_cp_ring_info *cpr;
2489
2490                 if (i >= bp->rx_cp_nr_rings) {
2491                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2492                         cpr = txq->cp_ring;
2493                 } else {
2494                         rxq = bp->rx_queues[i];
2495                         cpr = rxq->cp_ring;
2496                 }
2497
2498                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2499
2500                 if (rc)
2501                         return rc;
2502         }
2503         return rc;
2504 }
2505
2506 static int
2507 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2508 {
2509         uint16_t idx;
2510         uint32_t rc = 0;
2511
2512         if (!BNXT_HAS_RING_GRPS(bp))
2513                 return 0;
2514
2515         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2516
2517                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2518                         continue;
2519
2520                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2521
2522                 if (rc)
2523                         return rc;
2524         }
2525         return rc;
2526 }
2527
2528 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2529 {
2530         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2531
2532         bnxt_hwrm_ring_free(bp, cp_ring,
2533                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2534         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2535         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2536                                      sizeof(*cpr->cp_desc_ring));
2537         cpr->cp_raw_cons = 0;
2538         cpr->valid = 0;
2539 }
2540
2541 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2542 {
2543         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2544
2545         bnxt_hwrm_ring_free(bp, cp_ring,
2546                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2547         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2548         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2549                         sizeof(*cpr->cp_desc_ring));
2550         cpr->cp_raw_cons = 0;
2551         cpr->valid = 0;
2552 }
2553
2554 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2555 {
2556         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2557         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2558         struct bnxt_ring *ring = rxr->rx_ring_struct;
2559         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2560
2561         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2562                 bnxt_hwrm_ring_free(bp, ring,
2563                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2564                 ring->fw_ring_id = INVALID_HW_RING_ID;
2565                 if (BNXT_HAS_RING_GRPS(bp))
2566                         bp->grp_info[queue_index].rx_fw_ring_id =
2567                                                         INVALID_HW_RING_ID;
2568         }
2569         ring = rxr->ag_ring_struct;
2570         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2571                 bnxt_hwrm_ring_free(bp, ring,
2572                                     BNXT_CHIP_P5(bp) ?
2573                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2574                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2575                 if (BNXT_HAS_RING_GRPS(bp))
2576                         bp->grp_info[queue_index].ag_fw_ring_id =
2577                                                         INVALID_HW_RING_ID;
2578         }
2579         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2580                 bnxt_free_cp_ring(bp, cpr);
2581
2582         if (BNXT_HAS_RING_GRPS(bp))
2583                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2584 }
2585
2586 static int
2587 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2588 {
2589         unsigned int i;
2590
2591         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2592                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2593                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2594                 struct bnxt_ring *ring = txr->tx_ring_struct;
2595                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2596
2597                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2598                         bnxt_hwrm_ring_free(bp, ring,
2599                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2600                         ring->fw_ring_id = INVALID_HW_RING_ID;
2601                         memset(txr->tx_desc_ring, 0,
2602                                         txr->tx_ring_struct->ring_size *
2603                                         sizeof(*txr->tx_desc_ring));
2604                         memset(txr->tx_buf_ring, 0,
2605                                         txr->tx_ring_struct->ring_size *
2606                                         sizeof(*txr->tx_buf_ring));
2607                         txr->tx_raw_prod = 0;
2608                         txr->tx_raw_cons = 0;
2609                 }
2610                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2611                         bnxt_free_cp_ring(bp, cpr);
2612                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2613                 }
2614         }
2615
2616         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2617                 bnxt_free_hwrm_rx_ring(bp, i);
2618
2619         return 0;
2620 }
2621
2622 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2623 {
2624         uint16_t i;
2625         uint32_t rc = 0;
2626
2627         if (!BNXT_HAS_RING_GRPS(bp))
2628                 return 0;
2629
2630         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2631                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2632                 if (rc)
2633                         return rc;
2634         }
2635         return rc;
2636 }
2637
2638 /*
2639  * HWRM utility functions
2640  */
2641
2642 void bnxt_free_hwrm_resources(struct bnxt *bp)
2643 {
2644         /* Release memzone */
2645         rte_free(bp->hwrm_cmd_resp_addr);
2646         rte_free(bp->hwrm_short_cmd_req_addr);
2647         bp->hwrm_cmd_resp_addr = NULL;
2648         bp->hwrm_short_cmd_req_addr = NULL;
2649         bp->hwrm_cmd_resp_dma_addr = 0;
2650         bp->hwrm_short_cmd_req_dma_addr = 0;
2651 }
2652
2653 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2654 {
2655         struct rte_pci_device *pdev = bp->pdev;
2656         char type[RTE_MEMZONE_NAMESIZE];
2657
2658         sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2659                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2660         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2661         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2662         if (bp->hwrm_cmd_resp_addr == NULL)
2663                 return -ENOMEM;
2664         bp->hwrm_cmd_resp_dma_addr =
2665                 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2666         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2667                 PMD_DRV_LOG(ERR,
2668                         "unable to map response address to physical memory\n");
2669                 return -ENOMEM;
2670         }
2671         rte_spinlock_init(&bp->hwrm_lock);
2672
2673         return 0;
2674 }
2675
2676 int
2677 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2678 {
2679         int rc = 0;
2680
2681         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2682                 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2683                 if (rc)
2684                         return rc;
2685         } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2686                 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2687                 if (rc)
2688                         return rc;
2689         }
2690
2691         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2692         return rc;
2693 }
2694
2695 static int
2696 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2697 {
2698         struct bnxt_filter_info *filter;
2699         int rc = 0;
2700
2701         STAILQ_FOREACH(filter, &vnic->filter, next) {
2702                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2703                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2704                 bnxt_free_filter(bp, filter);
2705         }
2706         return rc;
2707 }
2708
2709 static int
2710 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2711 {
2712         struct bnxt_filter_info *filter;
2713         struct rte_flow *flow;
2714         int rc = 0;
2715
2716         while (!STAILQ_EMPTY(&vnic->flow_list)) {
2717                 flow = STAILQ_FIRST(&vnic->flow_list);
2718                 filter = flow->filter;
2719                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2720                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2721
2722                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2723                 rte_free(flow);
2724         }
2725         return rc;
2726 }
2727
2728 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2729 {
2730         struct bnxt_filter_info *filter;
2731         int rc = 0;
2732
2733         STAILQ_FOREACH(filter, &vnic->filter, next) {
2734                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2735                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2736                                                      filter);
2737                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2738                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2739                                                          filter);
2740                 else
2741                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2742                                                      filter);
2743                 if (rc)
2744                         break;
2745         }
2746         return rc;
2747 }
2748
2749 static void
2750 bnxt_free_tunnel_ports(struct bnxt *bp)
2751 {
2752         if (bp->vxlan_port_cnt)
2753                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2754                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2755
2756         if (bp->geneve_port_cnt)
2757                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2758                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2759 }
2760
2761 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2762 {
2763         int i;
2764
2765         if (bp->vnic_info == NULL)
2766                 return;
2767
2768         /*
2769          * Cleanup VNICs in reverse order, to make sure the L2 filter
2770          * from vnic0 is last to be cleaned up.
2771          */
2772         for (i = bp->max_vnics - 1; i >= 0; i--) {
2773                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2774
2775                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2776                         continue;
2777
2778                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2779
2780                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2781
2782                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2783
2784                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2785
2786                 bnxt_hwrm_vnic_free(bp, vnic);
2787
2788                 rte_free(vnic->fw_grp_ids);
2789         }
2790         /* Ring resources */
2791         bnxt_free_all_hwrm_rings(bp);
2792         bnxt_free_all_hwrm_ring_grps(bp);
2793         bnxt_free_all_hwrm_stat_ctxs(bp);
2794         bnxt_free_tunnel_ports(bp);
2795 }
2796
2797 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2798 {
2799         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2800
2801         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2802                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2803
2804         switch (conf_link_speed) {
2805         case ETH_LINK_SPEED_10M_HD:
2806         case ETH_LINK_SPEED_100M_HD:
2807                 /* FALLTHROUGH */
2808                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2809         }
2810         return hw_link_duplex;
2811 }
2812
2813 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2814 {
2815         return !conf_link;
2816 }
2817
2818 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
2819                                           uint16_t pam4_link)
2820 {
2821         uint16_t eth_link_speed = 0;
2822
2823         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2824                 return ETH_LINK_SPEED_AUTONEG;
2825
2826         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2827         case ETH_LINK_SPEED_100M:
2828         case ETH_LINK_SPEED_100M_HD:
2829                 /* FALLTHROUGH */
2830                 eth_link_speed =
2831                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2832                 break;
2833         case ETH_LINK_SPEED_1G:
2834                 eth_link_speed =
2835                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2836                 break;
2837         case ETH_LINK_SPEED_2_5G:
2838                 eth_link_speed =
2839                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2840                 break;
2841         case ETH_LINK_SPEED_10G:
2842                 eth_link_speed =
2843                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2844                 break;
2845         case ETH_LINK_SPEED_20G:
2846                 eth_link_speed =
2847                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2848                 break;
2849         case ETH_LINK_SPEED_25G:
2850                 eth_link_speed =
2851                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2852                 break;
2853         case ETH_LINK_SPEED_40G:
2854                 eth_link_speed =
2855                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2856                 break;
2857         case ETH_LINK_SPEED_50G:
2858                 eth_link_speed = pam4_link ?
2859                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
2860                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2861                 break;
2862         case ETH_LINK_SPEED_100G:
2863                 eth_link_speed = pam4_link ?
2864                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
2865                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2866                 break;
2867         case ETH_LINK_SPEED_200G:
2868                 eth_link_speed =
2869                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2870                 break;
2871         default:
2872                 PMD_DRV_LOG(ERR,
2873                         "Unsupported link speed %d; default to AUTO\n",
2874                         conf_link_speed);
2875                 break;
2876         }
2877         return eth_link_speed;
2878 }
2879
2880 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2881                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2882                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2883                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
2884                 ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
2885
2886 static int bnxt_validate_link_speed(struct bnxt *bp)
2887 {
2888         uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
2889         uint16_t port_id = bp->eth_dev->data->port_id;
2890         uint32_t link_speed_capa;
2891         uint32_t one_speed;
2892
2893         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2894                 return 0;
2895
2896         link_speed_capa = bnxt_get_speed_capabilities(bp);
2897
2898         if (link_speed & ETH_LINK_SPEED_FIXED) {
2899                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2900
2901                 if (one_speed & (one_speed - 1)) {
2902                         PMD_DRV_LOG(ERR,
2903                                 "Invalid advertised speeds (%u) for port %u\n",
2904                                 link_speed, port_id);
2905                         return -EINVAL;
2906                 }
2907                 if ((one_speed & link_speed_capa) != one_speed) {
2908                         PMD_DRV_LOG(ERR,
2909                                 "Unsupported advertised speed (%u) for port %u\n",
2910                                 link_speed, port_id);
2911                         return -EINVAL;
2912                 }
2913         } else {
2914                 if (!(link_speed & link_speed_capa)) {
2915                         PMD_DRV_LOG(ERR,
2916                                 "Unsupported advertised speeds (%u) for port %u\n",
2917                                 link_speed, port_id);
2918                         return -EINVAL;
2919                 }
2920         }
2921         return 0;
2922 }
2923
2924 static uint16_t
2925 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2926 {
2927         uint16_t ret = 0;
2928
2929         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2930                 if (bp->link_info->support_speeds)
2931                         return bp->link_info->support_speeds;
2932                 link_speed = BNXT_SUPPORTED_SPEEDS;
2933         }
2934
2935         if (link_speed & ETH_LINK_SPEED_100M)
2936                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2937         if (link_speed & ETH_LINK_SPEED_100M_HD)
2938                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2939         if (link_speed & ETH_LINK_SPEED_1G)
2940                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2941         if (link_speed & ETH_LINK_SPEED_2_5G)
2942                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2943         if (link_speed & ETH_LINK_SPEED_10G)
2944                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2945         if (link_speed & ETH_LINK_SPEED_20G)
2946                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2947         if (link_speed & ETH_LINK_SPEED_25G)
2948                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2949         if (link_speed & ETH_LINK_SPEED_40G)
2950                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2951         if (link_speed & ETH_LINK_SPEED_50G)
2952                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2953         if (link_speed & ETH_LINK_SPEED_100G)
2954                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2955         if (link_speed & ETH_LINK_SPEED_200G)
2956                 ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2957         return ret;
2958 }
2959
2960 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2961 {
2962         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2963
2964         switch (hw_link_speed) {
2965         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2966                 eth_link_speed = ETH_SPEED_NUM_100M;
2967                 break;
2968         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2969                 eth_link_speed = ETH_SPEED_NUM_1G;
2970                 break;
2971         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2972                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2973                 break;
2974         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2975                 eth_link_speed = ETH_SPEED_NUM_10G;
2976                 break;
2977         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2978                 eth_link_speed = ETH_SPEED_NUM_20G;
2979                 break;
2980         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2981                 eth_link_speed = ETH_SPEED_NUM_25G;
2982                 break;
2983         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2984                 eth_link_speed = ETH_SPEED_NUM_40G;
2985                 break;
2986         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2987                 eth_link_speed = ETH_SPEED_NUM_50G;
2988                 break;
2989         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2990                 eth_link_speed = ETH_SPEED_NUM_100G;
2991                 break;
2992         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
2993                 eth_link_speed = ETH_SPEED_NUM_200G;
2994                 break;
2995         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2996         default:
2997                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2998                         hw_link_speed);
2999                 break;
3000         }
3001         return eth_link_speed;
3002 }
3003
3004 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
3005 {
3006         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3007
3008         switch (hw_link_duplex) {
3009         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
3010         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
3011                 /* FALLTHROUGH */
3012                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3013                 break;
3014         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
3015                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
3016                 break;
3017         default:
3018                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
3019                         hw_link_duplex);
3020                 break;
3021         }
3022         return eth_link_duplex;
3023 }
3024
3025 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
3026 {
3027         int rc = 0;
3028         struct bnxt_link_info *link_info = bp->link_info;
3029
3030         rc = bnxt_hwrm_port_phy_qcaps(bp);
3031         if (rc)
3032                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3033
3034         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
3035         if (rc) {
3036                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3037                 goto exit;
3038         }
3039
3040         if (link_info->link_speed)
3041                 link->link_speed =
3042                         bnxt_parse_hw_link_speed(link_info->link_speed);
3043         else
3044                 link->link_speed = ETH_SPEED_NUM_NONE;
3045         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3046         link->link_status = link_info->link_up;
3047         link->link_autoneg = link_info->auto_mode ==
3048                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
3049                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
3050 exit:
3051         return rc;
3052 }
3053
3054 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3055 {
3056         int rc = 0;
3057         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3058         struct bnxt_link_info link_req;
3059         uint16_t speed, autoneg;
3060
3061         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3062                 return 0;
3063
3064         rc = bnxt_validate_link_speed(bp);
3065         if (rc)
3066                 goto error;
3067
3068         memset(&link_req, 0, sizeof(link_req));
3069         link_req.link_up = link_up;
3070         if (!link_up)
3071                 goto port_phy_cfg;
3072
3073         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3074         if (BNXT_CHIP_P5(bp) &&
3075             dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
3076                 /* 40G is not supported as part of media auto detect.
3077                  * The speed should be forced and autoneg disabled
3078                  * to configure 40G speed.
3079                  */
3080                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3081                 autoneg = 0;
3082         }
3083
3084         /* No auto speeds and no auto_pam4_link. Disable autoneg */
3085         if (bp->link_info->auto_link_speed == 0 &&
3086             bp->link_info->link_signal_mode &&
3087             bp->link_info->auto_pam4_link_speeds == 0)
3088                 autoneg = 0;
3089
3090         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
3091                                           bp->link_info->link_signal_mode);
3092         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3093         /* Autoneg can be done only when the FW allows.
3094          * When user configures fixed speed of 40G and later changes to
3095          * any other speed, auto_link_speed/force_link_speed is still set
3096          * to 40G until link comes up at new speed.
3097          */
3098         if (autoneg == 1 &&
3099             !(!BNXT_CHIP_P5(bp) &&
3100               (bp->link_info->auto_link_speed ||
3101                bp->link_info->force_link_speed))) {
3102                 link_req.phy_flags |=
3103                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3104                 link_req.auto_link_speed_mask =
3105                         bnxt_parse_eth_link_speed_mask(bp,
3106                                                        dev_conf->link_speeds);
3107         } else {
3108                 if (bp->link_info->phy_type ==
3109                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3110                     bp->link_info->phy_type ==
3111                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3112                     bp->link_info->media_type ==
3113                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3114                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3115                         return -EINVAL;
3116                 }
3117
3118                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3119                 /* If user wants a particular speed try that first. */
3120                 if (speed)
3121                         link_req.link_speed = speed;
3122                 else if (bp->link_info->force_pam4_link_speed)
3123                         link_req.link_speed =
3124                                 bp->link_info->force_pam4_link_speed;
3125                 else if (bp->link_info->auto_pam4_link_speeds)
3126                         link_req.link_speed =
3127                                 bp->link_info->auto_pam4_link_speeds;
3128                 else if (bp->link_info->support_pam4_speeds)
3129                         link_req.link_speed =
3130                                 bp->link_info->support_pam4_speeds;
3131                 else if (bp->link_info->force_link_speed)
3132                         link_req.link_speed = bp->link_info->force_link_speed;
3133                 else
3134                         link_req.link_speed = bp->link_info->auto_link_speed;
3135                 /* Auto PAM4 link speed is zero, but auto_link_speed is not
3136                  * zero. Use the auto_link_speed.
3137                  */
3138                 if (bp->link_info->auto_link_speed != 0 &&
3139                     bp->link_info->auto_pam4_link_speeds == 0)
3140                         link_req.link_speed = bp->link_info->auto_link_speed;
3141         }
3142         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3143         link_req.auto_pause = bp->link_info->auto_pause;
3144         link_req.force_pause = bp->link_info->force_pause;
3145
3146 port_phy_cfg:
3147         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3148         if (rc) {
3149                 PMD_DRV_LOG(ERR,
3150                         "Set link config failed with rc %d\n", rc);
3151         }
3152
3153 error:
3154         return rc;
3155 }
3156
3157 /* JIRA 22088 */
3158 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3159 {
3160         struct hwrm_func_qcfg_input req = {0};
3161         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3162         uint16_t flags;
3163         int rc = 0;
3164         bp->func_svif = BNXT_SVIF_INVALID;
3165         uint16_t svif_info;
3166
3167         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3168         req.fid = rte_cpu_to_le_16(0xffff);
3169
3170         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3171
3172         HWRM_CHECK_RESULT();
3173
3174         /* Hard Coded.. 0xfff VLAN ID mask */
3175         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3176
3177         svif_info = rte_le_to_cpu_16(resp->svif_info);
3178         if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3179                 bp->func_svif = svif_info &
3180                                      HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3181
3182         flags = rte_le_to_cpu_16(resp->flags);
3183         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3184                 bp->flags |= BNXT_FLAG_MULTI_HOST;
3185
3186         if (BNXT_VF(bp) &&
3187             !BNXT_VF_IS_TRUSTED(bp) &&
3188             (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3189                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3190                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3191         } else if (BNXT_VF(bp) &&
3192                    BNXT_VF_IS_TRUSTED(bp) &&
3193                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3194                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3195                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3196         }
3197
3198         if (mtu)
3199                 *mtu = rte_le_to_cpu_16(resp->mtu);
3200
3201         switch (resp->port_partition_type) {
3202         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3203         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3204         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3205                 /* FALLTHROUGH */
3206                 bp->flags |= BNXT_FLAG_NPAR_PF;
3207                 break;
3208         default:
3209                 bp->flags &= ~BNXT_FLAG_NPAR_PF;
3210                 break;
3211         }
3212
3213         bp->legacy_db_size =
3214                 rte_le_to_cpu_16(resp->legacy_l2_db_size_kb) * 1024;
3215
3216         HWRM_UNLOCK();
3217
3218         return rc;
3219 }
3220
3221 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3222 {
3223         struct hwrm_func_qcfg_input req = {0};
3224         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3225         int rc;
3226
3227         if (!BNXT_VF_IS_TRUSTED(bp))
3228                 return 0;
3229
3230         if (!bp->parent)
3231                 return -EINVAL;
3232
3233         bp->parent->fid = BNXT_PF_FID_INVALID;
3234
3235         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3236
3237         req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3238
3239         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3240
3241         HWRM_CHECK_RESULT();
3242
3243         memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3244         bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3245         bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3246         bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3247
3248         /* FIXME: Temporary workaround - remove when firmware issue is fixed. */
3249         if (bp->parent->vnic == 0) {
3250                 PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n");
3251                 /* Use hard-coded values appropriate for current Wh+ fw. */
3252                 if (bp->parent->fid == 2)
3253                         bp->parent->vnic = 0x100;
3254                 else
3255                         bp->parent->vnic = 1;
3256         }
3257
3258         HWRM_UNLOCK();
3259
3260         return 0;
3261 }
3262
3263 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3264                                  uint16_t *vnic_id, uint16_t *svif)
3265 {
3266         struct hwrm_func_qcfg_input req = {0};
3267         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3268         uint16_t svif_info;
3269         int rc = 0;
3270
3271         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3272         req.fid = rte_cpu_to_le_16(fid);
3273
3274         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3275
3276         HWRM_CHECK_RESULT();
3277
3278         if (vnic_id)
3279                 *vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3280
3281         svif_info = rte_le_to_cpu_16(resp->svif_info);
3282         if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3283                 *svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3284
3285         HWRM_UNLOCK();
3286
3287         return rc;
3288 }
3289
3290 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3291 {
3292         struct hwrm_port_mac_qcfg_input req = {0};
3293         struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3294         uint16_t port_svif_info;
3295         int rc;
3296
3297         bp->port_svif = BNXT_SVIF_INVALID;
3298
3299         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3300                 return 0;
3301
3302         HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3303
3304         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3305
3306         HWRM_CHECK_RESULT_SILENT();
3307
3308         port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3309         if (port_svif_info &
3310             HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3311                 bp->port_svif = port_svif_info &
3312                         HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3313
3314         HWRM_UNLOCK();
3315
3316         return 0;
3317 }
3318
3319 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
3320                                  struct bnxt_pf_resource_info *pf_resc)
3321 {
3322         struct hwrm_func_cfg_input req = {0};
3323         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3324         uint32_t enables;
3325         int rc;
3326
3327         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3328                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3329                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3330                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3331                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3332                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3333                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3334                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3335                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3336
3337         if (BNXT_HAS_RING_GRPS(bp)) {
3338                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3339                 req.num_hw_ring_grps =
3340                         rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
3341         } else if (BNXT_HAS_NQ(bp)) {
3342                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3343                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3344         }
3345
3346         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3347         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3348         req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3349         req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
3350         req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
3351         req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
3352         req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
3353         req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
3354         req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
3355         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3356         req.fid = rte_cpu_to_le_16(0xffff);
3357         req.enables = rte_cpu_to_le_32(enables);
3358
3359         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3360
3361         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3362
3363         HWRM_CHECK_RESULT();
3364         HWRM_UNLOCK();
3365
3366         return rc;
3367 }
3368
3369 /* min values are the guaranteed resources and max values are subject
3370  * to availability. The strategy for now is to keep both min & max
3371  * values the same.
3372  */
3373 static void
3374 bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
3375                               struct hwrm_func_vf_resource_cfg_input *req,
3376                               int num_vfs)
3377 {
3378         req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3379                                                (num_vfs + 1));
3380         req->min_rsscos_ctx = req->max_rsscos_ctx;
3381         req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3382         req->min_stat_ctx = req->max_stat_ctx;
3383         req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3384                                                (num_vfs + 1));
3385         req->min_cmpl_rings = req->max_cmpl_rings;
3386         req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3387         req->min_tx_rings = req->max_tx_rings;
3388         req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3389         req->min_rx_rings = req->max_rx_rings;
3390         req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3391         req->min_l2_ctxs = req->max_l2_ctxs;
3392         /* TODO: For now, do not support VMDq/RFS on VFs. */
3393         req->max_vnics = rte_cpu_to_le_16(1);
3394         req->min_vnics = req->max_vnics;
3395         req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3396                                                  (num_vfs + 1));
3397         req->min_hw_ring_grps = req->max_hw_ring_grps;
3398         req->flags =
3399          rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
3400 }
3401
3402 static void
3403 bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
3404                               struct hwrm_func_cfg_input *req,
3405                               int num_vfs)
3406 {
3407         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3408                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3409                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3410                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3411                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3412                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3413                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3414                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3415                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3416                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3417
3418         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3419                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3420                                     BNXT_NUM_VLANS);
3421         req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3422         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3423                                                 (num_vfs + 1));
3424         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3425         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3426                                                (num_vfs + 1));
3427         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3428         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3429         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3430         /* TODO: For now, do not support VMDq/RFS on VFs. */
3431         req->num_vnics = rte_cpu_to_le_16(1);
3432         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3433                                                  (num_vfs + 1));
3434 }
3435
3436 /* Update the port wide resource values based on how many resources
3437  * got allocated to the VF.
3438  */
3439 static int bnxt_update_max_resources(struct bnxt *bp,
3440                                      int vf)
3441 {
3442         struct hwrm_func_qcfg_input req = {0};
3443         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3444         int rc;
3445
3446         /* Get the actual allocated values now */
3447         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3448         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3449         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3450         HWRM_CHECK_RESULT();
3451
3452         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3453         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
3454         bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3455         bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
3456         bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
3457         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
3458         bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3459
3460         HWRM_UNLOCK();
3461
3462         return 0;
3463 }
3464
3465 /* Update the PF resource values based on how many resources
3466  * got allocated to it.
3467  */
3468 static int bnxt_update_max_resources_pf_only(struct bnxt *bp)
3469 {
3470         struct hwrm_func_qcfg_input req = {0};
3471         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3472         int rc;
3473
3474         /* Get the actual allocated values now */
3475         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3476         req.fid = rte_cpu_to_le_16(0xffff);
3477         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3478         HWRM_CHECK_RESULT();
3479
3480         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3481         bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3482         bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3483         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3484         bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3485         bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3486         bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3487         bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
3488
3489         HWRM_UNLOCK();
3490
3491         return 0;
3492 }
3493
3494 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3495 {
3496         struct hwrm_func_qcfg_input req = {0};
3497         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3498         int rc;
3499
3500         /* Check for zero MAC address */
3501         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3502         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3503         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3504         HWRM_CHECK_RESULT();
3505         rc = rte_le_to_cpu_16(resp->vlan);
3506
3507         HWRM_UNLOCK();
3508
3509         return rc;
3510 }
3511
3512 static int bnxt_query_pf_resources(struct bnxt *bp,
3513                                    struct bnxt_pf_resource_info *pf_resc)
3514 {
3515         struct hwrm_func_qcfg_input req = {0};
3516         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3517         int rc;
3518
3519         /* And copy the allocated numbers into the pf struct */
3520         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3521         req.fid = rte_cpu_to_le_16(0xffff);
3522         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3523         HWRM_CHECK_RESULT();
3524
3525         pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3526         pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3527         pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3528         pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3529         pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3530         pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3531         pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
3532         bp->pf->evb_mode = resp->evb_mode;
3533
3534         HWRM_UNLOCK();
3535
3536         return rc;
3537 }
3538
3539 static void
3540 bnxt_calculate_pf_resources(struct bnxt *bp,
3541                             struct bnxt_pf_resource_info *pf_resc,
3542                             int num_vfs)
3543 {
3544         if (!num_vfs) {
3545                 pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
3546                 pf_resc->num_stat_ctxs = bp->max_stat_ctx;
3547                 pf_resc->num_cp_rings = bp->max_cp_rings;
3548                 pf_resc->num_tx_rings = bp->max_tx_rings;
3549                 pf_resc->num_rx_rings = bp->max_rx_rings;
3550                 pf_resc->num_l2_ctxs = bp->max_l2_ctx;
3551                 pf_resc->num_hw_ring_grps = bp->max_ring_grps;
3552
3553                 return;
3554         }
3555
3556         pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
3557                                    bp->max_rsscos_ctx % (num_vfs + 1);
3558         pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
3559                                  bp->max_stat_ctx % (num_vfs + 1);
3560         pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
3561                                 bp->max_cp_rings % (num_vfs + 1);
3562         pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
3563                                 bp->max_tx_rings % (num_vfs + 1);
3564         pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
3565                                 bp->max_rx_rings % (num_vfs + 1);
3566         pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
3567                                bp->max_l2_ctx % (num_vfs + 1);
3568         pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
3569                                     bp->max_ring_grps % (num_vfs + 1);
3570 }
3571
3572 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3573 {
3574         struct bnxt_pf_resource_info pf_resc = { 0 };
3575         int rc;
3576
3577         if (!BNXT_PF(bp)) {
3578                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3579                 return -EINVAL;
3580         }
3581
3582         rc = bnxt_hwrm_func_qcaps(bp);
3583         if (rc)
3584                 return rc;
3585
3586         bnxt_calculate_pf_resources(bp, &pf_resc, 0);
3587
3588         bp->pf->func_cfg_flags &=
3589                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3590                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3591         bp->pf->func_cfg_flags |=
3592                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3593
3594         rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
3595         if (rc)
3596                 return rc;
3597
3598         rc = bnxt_update_max_resources_pf_only(bp);
3599
3600         return rc;
3601 }
3602
3603 static int
3604 bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
3605 {
3606         size_t req_buf_sz, sz;
3607         int i, rc;
3608
3609         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3610         bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3611                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3612         if (bp->pf->vf_req_buf == NULL) {
3613                 return -ENOMEM;
3614         }
3615
3616         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3617                 rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3618
3619         for (i = 0; i < num_vfs; i++)
3620                 bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3621                                              (i * HWRM_MAX_REQ_LEN);
3622
3623         rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
3624         if (rc)
3625                 rte_free(bp->pf->vf_req_buf);
3626
3627         return rc;
3628 }
3629
3630 static int
3631 bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
3632 {
3633         struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3634         struct hwrm_func_vf_resource_cfg_input req = {0};
3635         int i, rc = 0;
3636
3637         bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
3638         bp->pf->active_vfs = 0;
3639         for (i = 0; i < num_vfs; i++) {
3640                 HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
3641                 req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3642                 rc = bnxt_hwrm_send_message(bp,
3643                                             &req,
3644                                             sizeof(req),
3645                                             BNXT_USE_CHIMP_MB);
3646                 if (rc || resp->error_code) {
3647                         PMD_DRV_LOG(ERR,
3648                                 "Failed to initialize VF %d\n", i);
3649                         PMD_DRV_LOG(ERR,
3650                                 "Not all VFs available. (%d, %d)\n",
3651                                 rc, resp->error_code);
3652                         HWRM_UNLOCK();
3653
3654                         /* If the first VF configuration itself fails,
3655                          * unregister the vf_fwd_request buffer.
3656                          */
3657                         if (i == 0)
3658                                 bnxt_hwrm_func_buf_unrgtr(bp);
3659                         break;
3660                 }
3661                 HWRM_UNLOCK();
3662
3663                 /* Update the max resource values based on the resource values
3664                  * allocated to the VF.
3665                  */
3666                 bnxt_update_max_resources(bp, i);
3667                 bp->pf->active_vfs++;
3668                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3669         }
3670
3671         return 0;
3672 }
3673
3674 static int
3675 bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
3676 {
3677         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3678         struct hwrm_func_cfg_input req = {0};
3679         int i, rc;
3680
3681         bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
3682
3683         bp->pf->active_vfs = 0;
3684         for (i = 0; i < num_vfs; i++) {
3685                 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3686                 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3687                 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3688                 rc = bnxt_hwrm_send_message(bp,
3689                                             &req,
3690                                             sizeof(req),
3691                                             BNXT_USE_CHIMP_MB);
3692
3693                 /* Clear enable flag for next pass */
3694                 req.enables &= ~rte_cpu_to_le_32(
3695                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3696
3697                 if (rc || resp->error_code) {
3698                         PMD_DRV_LOG(ERR,
3699                                 "Failed to initialize VF %d\n", i);
3700                         PMD_DRV_LOG(ERR,
3701                                 "Not all VFs available. (%d, %d)\n",
3702                                 rc, resp->error_code);
3703                         HWRM_UNLOCK();
3704
3705                         /* If the first VF configuration itself fails,
3706                          * unregister the vf_fwd_request buffer.
3707                          */
3708                         if (i == 0)
3709                                 bnxt_hwrm_func_buf_unrgtr(bp);
3710                         break;
3711                 }
3712
3713                 HWRM_UNLOCK();
3714
3715                 /* Update the max resource values based on the resource values
3716                  * allocated to the VF.
3717                  */
3718                 bnxt_update_max_resources(bp, i);
3719                 bp->pf->active_vfs++;
3720                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3721         }
3722
3723         return 0;
3724 }
3725
3726 static void
3727 bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
3728 {
3729         if (bp->flags & BNXT_FLAG_NEW_RM)
3730                 bnxt_process_vf_resc_config_new(bp, num_vfs);
3731         else
3732                 bnxt_process_vf_resc_config_old(bp, num_vfs);
3733 }
3734
3735 static void
3736 bnxt_update_pf_resources(struct bnxt *bp,
3737                          struct bnxt_pf_resource_info *pf_resc)
3738 {
3739         bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
3740         bp->max_stat_ctx = pf_resc->num_stat_ctxs;
3741         bp->max_cp_rings = pf_resc->num_cp_rings;
3742         bp->max_tx_rings = pf_resc->num_tx_rings;
3743         bp->max_rx_rings = pf_resc->num_rx_rings;
3744         bp->max_ring_grps = pf_resc->num_hw_ring_grps;
3745 }
3746
3747 static int32_t
3748 bnxt_configure_pf_resources(struct bnxt *bp,
3749                             struct bnxt_pf_resource_info *pf_resc)
3750 {
3751         /*
3752          * We're using STD_TX_RING_MODE here which will limit the TX
3753          * rings. This will allow QoS to function properly. Not setting this
3754          * will cause PF rings to break bandwidth settings.
3755          */
3756         bp->pf->func_cfg_flags &=
3757                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3758                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3759         bp->pf->func_cfg_flags |=
3760                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3761         return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
3762 }
3763
3764 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3765 {
3766         struct bnxt_pf_resource_info pf_resc = { 0 };
3767         int rc;
3768
3769         if (!BNXT_PF(bp)) {
3770                 PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
3771                 return -EINVAL;
3772         }
3773
3774         rc = bnxt_hwrm_func_qcaps(bp);
3775         if (rc)
3776                 return rc;
3777
3778         bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
3779
3780         rc = bnxt_configure_pf_resources(bp, &pf_resc);
3781         if (rc)
3782                 return rc;
3783
3784         rc = bnxt_query_pf_resources(bp, &pf_resc);
3785         if (rc)
3786                 return rc;
3787
3788         /*
3789          * Now, create and register a buffer to hold forwarded VF requests
3790          */
3791         rc = bnxt_configure_vf_req_buf(bp, num_vfs);
3792         if (rc)
3793                 return rc;
3794
3795         bnxt_configure_vf_resources(bp, num_vfs);
3796
3797         bnxt_update_pf_resources(bp, &pf_resc);
3798
3799         return 0;
3800 }
3801
3802 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3803 {
3804         struct hwrm_func_cfg_input req = {0};
3805         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3806         int rc;
3807
3808         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3809
3810         req.fid = rte_cpu_to_le_16(0xffff);
3811         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3812         req.evb_mode = bp->pf->evb_mode;
3813
3814         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3815         HWRM_CHECK_RESULT();
3816         HWRM_UNLOCK();
3817
3818         return rc;
3819 }
3820
3821 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3822                                 uint8_t tunnel_type)
3823 {
3824         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3825         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3826         int rc = 0;
3827
3828         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3829         req.tunnel_type = tunnel_type;
3830         req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
3831         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3832         HWRM_CHECK_RESULT();
3833
3834         switch (tunnel_type) {
3835         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3836                 bp->vxlan_fw_dst_port_id =
3837                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3838                 bp->vxlan_port = port;
3839                 break;
3840         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3841                 bp->geneve_fw_dst_port_id =
3842                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3843                 bp->geneve_port = port;
3844                 break;
3845         default:
3846                 break;
3847         }
3848
3849         HWRM_UNLOCK();
3850
3851         return rc;
3852 }
3853
3854 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3855                                 uint8_t tunnel_type)
3856 {
3857         struct hwrm_tunnel_dst_port_free_input req = {0};
3858         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3859         int rc = 0;
3860
3861         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3862
3863         req.tunnel_type = tunnel_type;
3864         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3865         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3866
3867         HWRM_CHECK_RESULT();
3868         HWRM_UNLOCK();
3869
3870         if (tunnel_type ==
3871             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) {
3872                 bp->vxlan_port = 0;
3873                 bp->vxlan_port_cnt = 0;
3874         }
3875
3876         if (tunnel_type ==
3877             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) {
3878                 bp->geneve_port = 0;
3879                 bp->geneve_port_cnt = 0;
3880         }
3881
3882         return rc;
3883 }
3884
3885 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3886                                         uint32_t flags)
3887 {
3888         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3889         struct hwrm_func_cfg_input req = {0};
3890         int rc;
3891
3892         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3893
3894         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3895         req.flags = rte_cpu_to_le_32(flags);
3896         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3897
3898         HWRM_CHECK_RESULT();
3899         HWRM_UNLOCK();
3900
3901         return rc;
3902 }
3903
3904 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3905 {
3906         uint32_t *flag = flagp;
3907
3908         vnic->flags = *flag;
3909 }
3910
3911 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3912 {
3913         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3914 }
3915
3916 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
3917 {
3918         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3919         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3920         int rc;
3921
3922         HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3923
3924         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3925         req.req_buf_page_size =
3926                 rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
3927         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3928         req.req_buf_page_addr0 =
3929                 rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
3930         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3931                 PMD_DRV_LOG(ERR,
3932                         "unable to map buffer address to physical memory\n");
3933                 HWRM_UNLOCK();
3934                 return -ENOMEM;
3935         }
3936
3937         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3938
3939         HWRM_CHECK_RESULT();
3940         HWRM_UNLOCK();
3941
3942         return rc;
3943 }
3944
3945 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3946 {
3947         int rc = 0;
3948         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3949         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3950
3951         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3952                 return 0;
3953
3954         HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3955
3956         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3957
3958         HWRM_CHECK_RESULT();
3959         HWRM_UNLOCK();
3960
3961         return rc;
3962 }
3963
3964 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3965 {
3966         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3967         struct hwrm_func_cfg_input req = {0};
3968         int rc;
3969
3970         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3971
3972         req.fid = rte_cpu_to_le_16(0xffff);
3973         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3974         req.enables = rte_cpu_to_le_32(
3975                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3976         req.async_event_cr = rte_cpu_to_le_16(
3977                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3978         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3979
3980         HWRM_CHECK_RESULT();
3981         HWRM_UNLOCK();
3982
3983         return rc;
3984 }
3985
3986 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3987 {
3988         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3989         struct hwrm_func_vf_cfg_input req = {0};
3990         int rc;
3991
3992         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3993
3994         req.enables = rte_cpu_to_le_32(
3995                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3996         req.async_event_cr = rte_cpu_to_le_16(
3997                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3998         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3999
4000         HWRM_CHECK_RESULT();
4001         HWRM_UNLOCK();
4002
4003         return rc;
4004 }
4005
4006 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
4007 {
4008         struct hwrm_func_cfg_input req = {0};
4009         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4010         uint16_t dflt_vlan, fid;
4011         uint32_t func_cfg_flags;
4012         int rc = 0;
4013
4014         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4015
4016         if (is_vf) {
4017                 dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
4018                 fid = bp->pf->vf_info[vf].fid;
4019                 func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
4020         } else {
4021                 fid = rte_cpu_to_le_16(0xffff);
4022                 func_cfg_flags = bp->pf->func_cfg_flags;
4023                 dflt_vlan = bp->vlan;
4024         }
4025
4026         req.flags = rte_cpu_to_le_32(func_cfg_flags);
4027         req.fid = rte_cpu_to_le_16(fid);
4028         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4029         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
4030
4031         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4032
4033         HWRM_CHECK_RESULT();
4034         HWRM_UNLOCK();
4035
4036         return rc;
4037 }
4038
4039 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
4040                         uint16_t max_bw, uint16_t enables)
4041 {
4042         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4043         struct hwrm_func_cfg_input req = {0};
4044         int rc;
4045
4046         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4047
4048         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4049         req.enables |= rte_cpu_to_le_32(enables);
4050         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4051         req.max_bw = rte_cpu_to_le_32(max_bw);
4052         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4053
4054         HWRM_CHECK_RESULT();
4055         HWRM_UNLOCK();
4056
4057         return rc;
4058 }
4059
4060 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
4061 {
4062         struct hwrm_func_cfg_input req = {0};
4063         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4064         int rc = 0;
4065
4066         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4067
4068         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4069         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4070         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4071         req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
4072
4073         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4074
4075         HWRM_CHECK_RESULT();
4076         HWRM_UNLOCK();
4077
4078         return rc;
4079 }
4080
4081 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
4082 {
4083         int rc;
4084
4085         if (BNXT_PF(bp))
4086                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
4087         else
4088                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
4089
4090         return rc;
4091 }
4092
4093 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
4094                               void *encaped, size_t ec_size)
4095 {
4096         int rc = 0;
4097         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
4098         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4099
4100         if (ec_size > sizeof(req.encap_request))
4101                 return -1;
4102
4103         HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
4104
4105         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4106         memcpy(req.encap_request, encaped, ec_size);
4107
4108         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4109
4110         HWRM_CHECK_RESULT();
4111         HWRM_UNLOCK();
4112
4113         return rc;
4114 }
4115
4116 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
4117                                        struct rte_ether_addr *mac)
4118 {
4119         struct hwrm_func_qcfg_input req = {0};
4120         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4121         int rc;
4122
4123         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4124
4125         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4126         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4127
4128         HWRM_CHECK_RESULT();
4129
4130         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
4131
4132         HWRM_UNLOCK();
4133
4134         return rc;
4135 }
4136
4137 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
4138                             void *encaped, size_t ec_size)
4139 {
4140         int rc = 0;
4141         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
4142         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4143
4144         if (ec_size > sizeof(req.encap_request))
4145                 return -1;
4146
4147         HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
4148
4149         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4150         memcpy(req.encap_request, encaped, ec_size);
4151
4152         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4153
4154         HWRM_CHECK_RESULT();
4155         HWRM_UNLOCK();
4156
4157         return rc;
4158 }
4159
4160 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
4161                          struct rte_eth_stats *stats, uint8_t rx)
4162 {
4163         int rc = 0;
4164         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
4165         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
4166
4167         HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
4168
4169         req.stat_ctx_id = rte_cpu_to_le_32(cid);
4170
4171         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4172
4173         HWRM_CHECK_RESULT();
4174
4175         if (rx) {
4176                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
4177                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
4178                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
4179                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
4180                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
4181                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
4182                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts);
4183                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts);
4184         } else {
4185                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
4186                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
4187                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
4188                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
4189                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
4190                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
4191         }
4192
4193         HWRM_UNLOCK();
4194
4195         return rc;
4196 }
4197
4198 int bnxt_hwrm_port_qstats(struct bnxt *bp)
4199 {
4200         struct hwrm_port_qstats_input req = {0};
4201         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
4202         struct bnxt_pf_info *pf = bp->pf;
4203         int rc;
4204
4205         HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
4206
4207         req.port_id = rte_cpu_to_le_16(pf->port_id);
4208         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
4209         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
4210         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4211
4212         HWRM_CHECK_RESULT();
4213         HWRM_UNLOCK();
4214
4215         return rc;
4216 }
4217
4218 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
4219 {
4220         struct hwrm_port_clr_stats_input req = {0};
4221         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
4222         struct bnxt_pf_info *pf = bp->pf;
4223         int rc;
4224
4225         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
4226         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
4227             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
4228                 return 0;
4229
4230         HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4231
4232         req.port_id = rte_cpu_to_le_16(pf->port_id);
4233         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4234
4235         HWRM_CHECK_RESULT();
4236         HWRM_UNLOCK();
4237
4238         return rc;
4239 }
4240
4241 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4242 {
4243         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4244         struct hwrm_port_led_qcaps_input req = {0};
4245         int rc;
4246
4247         if (BNXT_VF(bp))
4248                 return 0;
4249
4250         HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4251         req.port_id = bp->pf->port_id;
4252         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4253
4254         HWRM_CHECK_RESULT();
4255
4256         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4257                 unsigned int i;
4258
4259                 bp->leds->num_leds = resp->num_leds;
4260                 memcpy(bp->leds, &resp->led0_id,
4261                         sizeof(bp->leds[0]) * bp->leds->num_leds);
4262                 for (i = 0; i < bp->leds->num_leds; i++) {
4263                         struct bnxt_led_info *led = &bp->leds[i];
4264
4265                         uint16_t caps = led->led_state_caps;
4266
4267                         if (!led->led_group_id ||
4268                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
4269                                 bp->leds->num_leds = 0;
4270                                 break;
4271                         }
4272                 }
4273         }
4274
4275         HWRM_UNLOCK();
4276
4277         return rc;
4278 }
4279
4280 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4281 {
4282         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4283         struct hwrm_port_led_cfg_input req = {0};
4284         struct bnxt_led_cfg *led_cfg;
4285         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4286         uint16_t duration = 0;
4287         int rc, i;
4288
4289         if (!bp->leds->num_leds || BNXT_VF(bp))
4290                 return -EOPNOTSUPP;
4291
4292         HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4293
4294         if (led_on) {
4295                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4296                 duration = rte_cpu_to_le_16(500);
4297         }
4298         req.port_id = bp->pf->port_id;
4299         req.num_leds = bp->leds->num_leds;
4300         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4301         for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4302                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
4303                 led_cfg->led_id = bp->leds[i].led_id;
4304                 led_cfg->led_state = led_state;
4305                 led_cfg->led_blink_on = duration;
4306                 led_cfg->led_blink_off = duration;
4307                 led_cfg->led_group_id = bp->leds[i].led_group_id;
4308         }
4309
4310         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4311
4312         HWRM_CHECK_RESULT();
4313         HWRM_UNLOCK();
4314
4315         return rc;
4316 }
4317
4318 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4319                                uint32_t *length)
4320 {
4321         int rc;
4322         struct hwrm_nvm_get_dir_info_input req = {0};
4323         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4324
4325         HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4326
4327         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4328
4329         HWRM_CHECK_RESULT();
4330
4331         *entries = rte_le_to_cpu_32(resp->entries);
4332         *length = rte_le_to_cpu_32(resp->entry_length);
4333
4334         HWRM_UNLOCK();
4335         return rc;
4336 }
4337
4338 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4339 {
4340         int rc;
4341         uint32_t dir_entries;
4342         uint32_t entry_length;
4343         uint8_t *buf;
4344         size_t buflen;
4345         rte_iova_t dma_handle;
4346         struct hwrm_nvm_get_dir_entries_input req = {0};
4347         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4348
4349         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4350         if (rc != 0)
4351                 return rc;
4352
4353         *data++ = dir_entries;
4354         *data++ = entry_length;
4355         len -= 2;
4356         memset(data, 0xff, len);
4357
4358         buflen = dir_entries * entry_length;
4359         buf = rte_malloc("nvm_dir", buflen, 0);
4360         if (buf == NULL)
4361                 return -ENOMEM;
4362         dma_handle = rte_malloc_virt2iova(buf);
4363         if (dma_handle == RTE_BAD_IOVA) {
4364                 rte_free(buf);
4365                 PMD_DRV_LOG(ERR,
4366                         "unable to map response address to physical memory\n");
4367                 return -ENOMEM;
4368         }
4369         HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4370         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4371         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4372
4373         if (rc == 0)
4374                 memcpy(data, buf, len > buflen ? buflen : len);
4375
4376         rte_free(buf);
4377         HWRM_CHECK_RESULT();
4378         HWRM_UNLOCK();
4379
4380         return rc;
4381 }
4382
4383 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4384                              uint32_t offset, uint32_t length,
4385                              uint8_t *data)
4386 {
4387         int rc;
4388         uint8_t *buf;
4389         rte_iova_t dma_handle;
4390         struct hwrm_nvm_read_input req = {0};
4391         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4392
4393         buf = rte_malloc("nvm_item", length, 0);
4394         if (!buf)
4395                 return -ENOMEM;
4396
4397         dma_handle = rte_malloc_virt2iova(buf);
4398         if (dma_handle == RTE_BAD_IOVA) {
4399                 rte_free(buf);
4400                 PMD_DRV_LOG(ERR,
4401                         "unable to map response address to physical memory\n");
4402                 return -ENOMEM;
4403         }
4404         HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4405         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4406         req.dir_idx = rte_cpu_to_le_16(index);
4407         req.offset = rte_cpu_to_le_32(offset);
4408         req.len = rte_cpu_to_le_32(length);
4409         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4410         if (rc == 0)
4411                 memcpy(data, buf, length);
4412
4413         rte_free(buf);
4414         HWRM_CHECK_RESULT();
4415         HWRM_UNLOCK();
4416
4417         return rc;
4418 }
4419
4420 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4421 {
4422         int rc;
4423         struct hwrm_nvm_erase_dir_entry_input req = {0};
4424         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4425
4426         HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4427         req.dir_idx = rte_cpu_to_le_16(index);
4428         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4429         HWRM_CHECK_RESULT();
4430         HWRM_UNLOCK();
4431
4432         return rc;
4433 }
4434
4435
4436 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4437                           uint16_t dir_ordinal, uint16_t dir_ext,
4438                           uint16_t dir_attr, const uint8_t *data,
4439                           size_t data_len)
4440 {
4441         int rc;
4442         struct hwrm_nvm_write_input req = {0};
4443         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4444         rte_iova_t dma_handle;
4445         uint8_t *buf;
4446
4447         buf = rte_malloc("nvm_write", data_len, 0);
4448         if (!buf)
4449                 return -ENOMEM;
4450
4451         dma_handle = rte_malloc_virt2iova(buf);
4452         if (dma_handle == RTE_BAD_IOVA) {
4453                 rte_free(buf);
4454                 PMD_DRV_LOG(ERR,
4455                         "unable to map response address to physical memory\n");
4456                 return -ENOMEM;
4457         }
4458         memcpy(buf, data, data_len);
4459
4460         HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4461
4462         req.dir_type = rte_cpu_to_le_16(dir_type);
4463         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4464         req.dir_ext = rte_cpu_to_le_16(dir_ext);
4465         req.dir_attr = rte_cpu_to_le_16(dir_attr);
4466         req.dir_data_length = rte_cpu_to_le_32(data_len);
4467         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4468
4469         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4470
4471         rte_free(buf);
4472         HWRM_CHECK_RESULT();
4473         HWRM_UNLOCK();
4474
4475         return rc;
4476 }
4477
4478 static void
4479 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4480 {
4481         uint32_t *count = cbdata;
4482
4483         *count = *count + 1;
4484 }
4485
4486 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4487                                      struct bnxt_vnic_info *vnic __rte_unused)
4488 {
4489         return 0;
4490 }
4491
4492 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4493 {
4494         uint32_t count = 0;
4495
4496         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4497             &count, bnxt_vnic_count_hwrm_stub);
4498
4499         return count;
4500 }
4501
4502 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4503                                         uint16_t *vnic_ids)
4504 {
4505         struct hwrm_func_vf_vnic_ids_query_input req = {0};
4506         struct hwrm_func_vf_vnic_ids_query_output *resp =
4507                                                 bp->hwrm_cmd_resp_addr;
4508         int rc;
4509
4510         /* First query all VNIC ids */
4511         HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4512
4513         req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4514         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4515         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4516
4517         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4518                 HWRM_UNLOCK();
4519                 PMD_DRV_LOG(ERR,
4520                 "unable to map VNIC ID table address to physical memory\n");
4521                 return -ENOMEM;
4522         }
4523         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4524         HWRM_CHECK_RESULT();
4525         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4526
4527         HWRM_UNLOCK();
4528
4529         return rc;
4530 }
4531
4532 /*
4533  * This function queries the VNIC IDs  for a specified VF. It then calls
4534  * the vnic_cb to update the necessary field in vnic_info with cbdata.
4535  * Then it calls the hwrm_cb function to program this new vnic configuration.
4536  */
4537 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4538         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4539         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4540 {
4541         struct bnxt_vnic_info vnic;
4542         int rc = 0;
4543         int i, num_vnic_ids;
4544         uint16_t *vnic_ids;
4545         size_t vnic_id_sz;
4546         size_t sz;
4547
4548         /* First query all VNIC ids */
4549         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4550         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4551                         RTE_CACHE_LINE_SIZE);
4552         if (vnic_ids == NULL)
4553                 return -ENOMEM;
4554
4555         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4556                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4557
4558         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4559
4560         if (num_vnic_ids < 0)
4561                 return num_vnic_ids;
4562
4563         /* Retrieve VNIC, update bd_stall then update */
4564
4565         for (i = 0; i < num_vnic_ids; i++) {
4566                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4567                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4568                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4569                 if (rc)
4570                         break;
4571                 if (vnic.mru <= 4)      /* Indicates unallocated */
4572                         continue;
4573
4574                 vnic_cb(&vnic, cbdata);
4575
4576                 rc = hwrm_cb(bp, &vnic);
4577                 if (rc)
4578                         break;
4579         }
4580
4581         rte_free(vnic_ids);
4582
4583         return rc;
4584 }
4585
4586 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4587                                               bool on)
4588 {
4589         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4590         struct hwrm_func_cfg_input req = {0};
4591         int rc;
4592
4593         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4594
4595         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4596         req.enables |= rte_cpu_to_le_32(
4597                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4598         req.vlan_antispoof_mode = on ?
4599                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4600                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4601         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4602
4603         HWRM_CHECK_RESULT();
4604         HWRM_UNLOCK();
4605
4606         return rc;
4607 }
4608
4609 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4610 {
4611         struct bnxt_vnic_info vnic;
4612         uint16_t *vnic_ids;
4613         size_t vnic_id_sz;
4614         int num_vnic_ids, i;
4615         size_t sz;
4616         int rc;
4617
4618         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4619         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4620                         RTE_CACHE_LINE_SIZE);
4621         if (vnic_ids == NULL)
4622                 return -ENOMEM;
4623
4624         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4625                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4626
4627         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4628         if (rc <= 0)
4629                 goto exit;
4630         num_vnic_ids = rc;
4631
4632         /*
4633          * Loop through to find the default VNIC ID.
4634          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4635          * by sending the hwrm_func_qcfg command to the firmware.
4636          */
4637         for (i = 0; i < num_vnic_ids; i++) {
4638                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4639                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4640                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4641                                         bp->pf->first_vf_id + vf);
4642                 if (rc)
4643                         goto exit;
4644                 if (vnic.func_default) {
4645                         rte_free(vnic_ids);
4646                         return vnic.fw_vnic_id;
4647                 }
4648         }
4649         /* Could not find a default VNIC. */
4650         PMD_DRV_LOG(ERR, "No default VNIC\n");
4651 exit:
4652         rte_free(vnic_ids);
4653         return rc;
4654 }
4655
4656 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4657                          uint16_t dst_id,
4658                          struct bnxt_filter_info *filter)
4659 {
4660         int rc = 0;
4661         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4662         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4663         uint32_t enables = 0;
4664
4665         if (filter->fw_em_filter_id != UINT64_MAX)
4666                 bnxt_hwrm_clear_em_filter(bp, filter);
4667
4668         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4669
4670         req.flags = rte_cpu_to_le_32(filter->flags);
4671
4672         enables = filter->enables |
4673               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4674         req.dst_id = rte_cpu_to_le_16(dst_id);
4675
4676         if (filter->ip_addr_type) {
4677                 req.ip_addr_type = filter->ip_addr_type;
4678                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4679         }
4680         if (enables &
4681             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4682                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4683         if (enables &
4684             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4685                 memcpy(req.src_macaddr, filter->src_macaddr,
4686                        RTE_ETHER_ADDR_LEN);
4687         if (enables &
4688             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4689                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4690                        RTE_ETHER_ADDR_LEN);
4691         if (enables &
4692             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4693                 req.ovlan_vid = filter->l2_ovlan;
4694         if (enables &
4695             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4696                 req.ivlan_vid = filter->l2_ivlan;
4697         if (enables &
4698             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4699                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4700         if (enables &
4701             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4702                 req.ip_protocol = filter->ip_protocol;
4703         if (enables &
4704             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4705                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4706         if (enables &
4707             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4708                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4709         if (enables &
4710             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4711                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4712         if (enables &
4713             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4714                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4715         if (enables &
4716             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4717                 req.mirror_vnic_id = filter->mirror_vnic_id;
4718
4719         req.enables = rte_cpu_to_le_32(enables);
4720
4721         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4722
4723         HWRM_CHECK_RESULT();
4724
4725         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4726         HWRM_UNLOCK();
4727
4728         return rc;
4729 }
4730
4731 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4732 {
4733         int rc = 0;
4734         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4735         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4736
4737         if (filter->fw_em_filter_id == UINT64_MAX)
4738                 return 0;
4739
4740         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4741
4742         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4743
4744         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4745
4746         HWRM_CHECK_RESULT();
4747         HWRM_UNLOCK();
4748
4749         filter->fw_em_filter_id = UINT64_MAX;
4750         filter->fw_l2_filter_id = UINT64_MAX;
4751
4752         return 0;
4753 }
4754
4755 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4756                          uint16_t dst_id,
4757                          struct bnxt_filter_info *filter)
4758 {
4759         int rc = 0;
4760         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4761         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4762                                                 bp->hwrm_cmd_resp_addr;
4763         uint32_t enables = 0;
4764
4765         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4766                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4767
4768         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4769
4770         req.flags = rte_cpu_to_le_32(filter->flags);
4771
4772         enables = filter->enables |
4773               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4774         req.dst_id = rte_cpu_to_le_16(dst_id);
4775
4776         if (filter->ip_addr_type) {
4777                 req.ip_addr_type = filter->ip_addr_type;
4778                 enables |=
4779                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4780         }
4781         if (enables &
4782             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4783                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4784         if (enables &
4785             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4786                 memcpy(req.src_macaddr, filter->src_macaddr,
4787                        RTE_ETHER_ADDR_LEN);
4788         if (enables &
4789             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4790                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4791         if (enables &
4792             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4793                 req.ip_protocol = filter->ip_protocol;
4794         if (enables &
4795             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4796                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4797         if (enables &
4798             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4799                 req.src_ipaddr_mask[0] =
4800                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4801         if (enables &
4802             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4803                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4804         if (enables &
4805             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4806                 req.dst_ipaddr_mask[0] =
4807                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4808         if (enables &
4809             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4810                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4811         if (enables &
4812             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4813                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4814         if (enables &
4815             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4816                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4817         if (enables &
4818             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4819                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4820         if (enables &
4821             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4822                 req.mirror_vnic_id = filter->mirror_vnic_id;
4823
4824         req.enables = rte_cpu_to_le_32(enables);
4825
4826         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4827
4828         HWRM_CHECK_RESULT();
4829
4830         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4831         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
4832         HWRM_UNLOCK();
4833
4834         return rc;
4835 }
4836
4837 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4838                                 struct bnxt_filter_info *filter)
4839 {
4840         int rc = 0;
4841         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4842         struct hwrm_cfa_ntuple_filter_free_output *resp =
4843                                                 bp->hwrm_cmd_resp_addr;
4844
4845         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4846                 return 0;
4847
4848         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4849
4850         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4851
4852         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4853
4854         HWRM_CHECK_RESULT();
4855         HWRM_UNLOCK();
4856
4857         filter->fw_ntuple_filter_id = UINT64_MAX;
4858
4859         return 0;
4860 }
4861
4862 static int
4863 bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4864 {
4865         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4866         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4867         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4868         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4869         uint16_t *ring_tbl = vnic->rss_table;
4870         int nr_ctxs = vnic->num_lb_ctxts;
4871         int max_rings = bp->rx_nr_rings;
4872         int i, j, k, cnt;
4873         int rc = 0;
4874
4875         for (i = 0, k = 0; i < nr_ctxs; i++) {
4876                 struct bnxt_rx_ring_info *rxr;
4877                 struct bnxt_cp_ring_info *cpr;
4878
4879                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4880
4881                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4882                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4883                 req.hash_mode_flags = vnic->hash_mode;
4884
4885                 req.ring_grp_tbl_addr =
4886                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4887                                      i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
4888                                      2 * sizeof(*ring_tbl));
4889                 req.hash_key_tbl_addr =
4890                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4891
4892                 req.ring_table_pair_index = i;
4893                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4894
4895                 for (j = 0; j < 64; j++) {
4896                         uint16_t ring_id;
4897
4898                         /* Find next active ring. */
4899                         for (cnt = 0; cnt < max_rings; cnt++) {
4900                                 if (rx_queue_state[k] !=
4901                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4902                                         break;
4903                                 if (++k == max_rings)
4904                                         k = 0;
4905                         }
4906
4907                         /* Return if no rings are active. */
4908                         if (cnt == max_rings) {
4909                                 HWRM_UNLOCK();
4910                                 return 0;
4911                         }
4912
4913                         /* Add rx/cp ring pair to RSS table. */
4914                         rxr = rxqs[k]->rx_ring;
4915                         cpr = rxqs[k]->cp_ring;
4916
4917                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4918                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4919                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4920                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4921
4922                         if (++k == max_rings)
4923                                 k = 0;
4924                 }
4925                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4926                                             BNXT_USE_CHIMP_MB);
4927
4928                 HWRM_CHECK_RESULT();
4929                 HWRM_UNLOCK();
4930         }
4931
4932         return rc;
4933 }
4934
4935 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4936 {
4937         unsigned int rss_idx, fw_idx, i;
4938
4939         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4940                 return 0;
4941
4942         if (!(vnic->rss_table && vnic->hash_type))
4943                 return 0;
4944
4945         if (BNXT_CHIP_P5(bp))
4946                 return bnxt_vnic_rss_configure_p5(bp, vnic);
4947
4948         /*
4949          * Fill the RSS hash & redirection table with
4950          * ring group ids for all VNICs
4951          */
4952         for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4953              rss_idx++, fw_idx++) {
4954                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4955                         fw_idx %= bp->rx_cp_nr_rings;
4956                         if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
4957                                 break;
4958                         fw_idx++;
4959                 }
4960
4961                 if (i == bp->rx_cp_nr_rings)
4962                         return 0;
4963
4964                 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4965         }
4966
4967         return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4968 }
4969
4970 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4971         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4972 {
4973         uint16_t flags;
4974
4975         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4976
4977         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4978         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4979
4980         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4981         req->num_cmpl_dma_aggr_during_int =
4982                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4983
4984         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4985
4986         /* min timer set to 1/2 of interrupt timer */
4987         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4988
4989         /* buf timer set to 1/4 of interrupt timer */
4990         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4991
4992         req->cmpl_aggr_dma_tmr_during_int =
4993                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4994
4995         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4996                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4997         req->flags = rte_cpu_to_le_16(flags);
4998 }
4999
5000 static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
5001                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
5002 {
5003         struct hwrm_ring_aggint_qcaps_input req = {0};
5004         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5005         uint32_t enables;
5006         uint16_t flags;
5007         int rc;
5008
5009         HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
5010         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5011         HWRM_CHECK_RESULT();
5012
5013         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
5014         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
5015
5016         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5017                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5018         agg_req->flags = rte_cpu_to_le_16(flags);
5019         enables =
5020          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
5021          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
5022         agg_req->enables = rte_cpu_to_le_32(enables);
5023
5024         HWRM_UNLOCK();
5025         return rc;
5026 }
5027
5028 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
5029                         struct bnxt_coal *coal, uint16_t ring_id)
5030 {
5031         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5032         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
5033                                                 bp->hwrm_cmd_resp_addr;
5034         int rc;
5035
5036         /* Set ring coalesce parameters only for 100G NICs */
5037         if (BNXT_CHIP_P5(bp)) {
5038                 if (bnxt_hwrm_set_coal_params_p5(bp, &req))
5039                         return -1;
5040         } else if (bnxt_stratus_device(bp)) {
5041                 bnxt_hwrm_set_coal_params(coal, &req);
5042         } else {
5043                 return 0;
5044         }
5045
5046         HWRM_PREP(&req,
5047                   HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5048                   BNXT_USE_CHIMP_MB);
5049         req.ring_id = rte_cpu_to_le_16(ring_id);
5050         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5051         HWRM_CHECK_RESULT();
5052         HWRM_UNLOCK();
5053         return 0;
5054 }
5055
5056 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
5057 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5058 {
5059         struct hwrm_func_backing_store_qcaps_input req = {0};
5060         struct hwrm_func_backing_store_qcaps_output *resp =
5061                 bp->hwrm_cmd_resp_addr;
5062         struct bnxt_ctx_pg_info *ctx_pg;
5063         struct bnxt_ctx_mem_info *ctx;
5064         int total_alloc_len;
5065         int rc, i, tqm_rings;
5066
5067         if (!BNXT_CHIP_P5(bp) ||
5068             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
5069             BNXT_VF(bp) ||
5070             bp->ctx)
5071                 return 0;
5072
5073         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
5074         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5075         HWRM_CHECK_RESULT_SILENT();
5076
5077         total_alloc_len = sizeof(*ctx);
5078         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
5079                           RTE_CACHE_LINE_SIZE);
5080         if (!ctx) {
5081                 rc = -ENOMEM;
5082                 goto ctx_err;
5083         }
5084
5085         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
5086         ctx->qp_min_qp1_entries =
5087                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
5088         ctx->qp_max_l2_entries =
5089                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
5090         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
5091         ctx->srq_max_l2_entries =
5092                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
5093         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
5094         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
5095         ctx->cq_max_l2_entries =
5096                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
5097         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
5098         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
5099         ctx->vnic_max_vnic_entries =
5100                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
5101         ctx->vnic_max_ring_table_entries =
5102                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
5103         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
5104         ctx->stat_max_entries =
5105                 rte_le_to_cpu_32(resp->stat_max_entries);
5106         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
5107         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
5108         ctx->tqm_min_entries_per_ring =
5109                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
5110         ctx->tqm_max_entries_per_ring =
5111                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
5112         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5113         if (!ctx->tqm_entries_multiple)
5114                 ctx->tqm_entries_multiple = 1;
5115         ctx->mrav_max_entries =
5116                 rte_le_to_cpu_32(resp->mrav_max_entries);
5117         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
5118         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
5119         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
5120         ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
5121
5122         if (!ctx->tqm_fp_rings_count)
5123                 ctx->tqm_fp_rings_count = bp->max_q;
5124
5125         tqm_rings = ctx->tqm_fp_rings_count + 1;
5126
5127         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
5128                             sizeof(*ctx_pg) * tqm_rings,
5129                             RTE_CACHE_LINE_SIZE);
5130         if (!ctx_pg) {
5131                 rc = -ENOMEM;
5132                 goto ctx_err;
5133         }
5134         for (i = 0; i < tqm_rings; i++, ctx_pg++)
5135                 ctx->tqm_mem[i] = ctx_pg;
5136
5137         bp->ctx = ctx;
5138 ctx_err:
5139         HWRM_UNLOCK();
5140         return rc;
5141 }
5142
5143 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
5144 {
5145         struct hwrm_func_backing_store_cfg_input req = {0};
5146         struct hwrm_func_backing_store_cfg_output *resp =
5147                 bp->hwrm_cmd_resp_addr;
5148         struct bnxt_ctx_mem_info *ctx = bp->ctx;
5149         struct bnxt_ctx_pg_info *ctx_pg;
5150         uint32_t *num_entries;
5151         uint64_t *pg_dir;
5152         uint8_t *pg_attr;
5153         uint32_t ena;
5154         int i, rc;
5155
5156         if (!ctx)
5157                 return 0;
5158
5159         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
5160         req.enables = rte_cpu_to_le_32(enables);
5161
5162         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
5163                 ctx_pg = &ctx->qp_mem;
5164                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5165                 req.qp_num_qp1_entries =
5166                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
5167                 req.qp_num_l2_entries =
5168                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
5169                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
5170                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5171                                       &req.qpc_pg_size_qpc_lvl,
5172                                       &req.qpc_page_dir);
5173         }
5174
5175         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
5176                 ctx_pg = &ctx->srq_mem;
5177                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5178                 req.srq_num_l2_entries =
5179                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
5180                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
5181                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5182                                       &req.srq_pg_size_srq_lvl,
5183                                       &req.srq_page_dir);
5184         }
5185
5186         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
5187                 ctx_pg = &ctx->cq_mem;
5188                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5189                 req.cq_num_l2_entries =
5190                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
5191                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
5192                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5193                                       &req.cq_pg_size_cq_lvl,
5194                                       &req.cq_page_dir);
5195         }
5196
5197         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
5198                 ctx_pg = &ctx->vnic_mem;
5199                 req.vnic_num_vnic_entries =
5200                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
5201                 req.vnic_num_ring_table_entries =
5202                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
5203                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
5204                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5205                                       &req.vnic_pg_size_vnic_lvl,
5206                                       &req.vnic_page_dir);
5207         }
5208
5209         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
5210                 ctx_pg = &ctx->stat_mem;
5211                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
5212                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
5213                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5214                                       &req.stat_pg_size_stat_lvl,
5215                                       &req.stat_page_dir);
5216         }
5217
5218         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5219         num_entries = &req.tqm_sp_num_entries;
5220         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
5221         pg_dir = &req.tqm_sp_page_dir;
5222         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
5223         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
5224                 if (!(enables & ena))
5225                         continue;
5226
5227                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5228
5229                 ctx_pg = ctx->tqm_mem[i];
5230                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5231                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5232         }
5233
5234         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5235         HWRM_CHECK_RESULT();
5236         HWRM_UNLOCK();
5237
5238         return rc;
5239 }
5240
5241 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5242 {
5243         struct hwrm_port_qstats_ext_input req = {0};
5244         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5245         struct bnxt_pf_info *pf = bp->pf;
5246         int rc;
5247
5248         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5249               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5250                 return 0;
5251
5252         HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5253
5254         req.port_id = rte_cpu_to_le_16(pf->port_id);
5255         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5256                 req.tx_stat_host_addr =
5257                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5258                 req.tx_stat_size =
5259                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5260         }
5261         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5262                 req.rx_stat_host_addr =
5263                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5264                 req.rx_stat_size =
5265                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5266         }
5267         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5268
5269         if (rc) {
5270                 bp->fw_rx_port_stats_ext_size = 0;
5271                 bp->fw_tx_port_stats_ext_size = 0;
5272         } else {
5273                 bp->fw_rx_port_stats_ext_size =
5274                         rte_le_to_cpu_16(resp->rx_stat_size);
5275                 bp->fw_tx_port_stats_ext_size =
5276                         rte_le_to_cpu_16(resp->tx_stat_size);
5277         }
5278
5279         HWRM_CHECK_RESULT();
5280         HWRM_UNLOCK();
5281
5282         return rc;
5283 }
5284
5285 int
5286 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5287 {
5288         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5289         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5290                 bp->hwrm_cmd_resp_addr;
5291         int rc = 0;
5292
5293         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5294         req.tunnel_type = type;
5295         req.dest_fid = bp->fw_fid;
5296         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5297         HWRM_CHECK_RESULT();
5298
5299         HWRM_UNLOCK();
5300
5301         return rc;
5302 }
5303
5304 int
5305 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5306 {
5307         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5308         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5309                 bp->hwrm_cmd_resp_addr;
5310         int rc = 0;
5311
5312         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5313         req.tunnel_type = type;
5314         req.dest_fid = bp->fw_fid;
5315         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5316         HWRM_CHECK_RESULT();
5317
5318         HWRM_UNLOCK();
5319
5320         return rc;
5321 }
5322
5323 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5324 {
5325         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5326         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5327                 bp->hwrm_cmd_resp_addr;
5328         int rc = 0;
5329
5330         HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5331         req.src_fid = bp->fw_fid;
5332         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5333         HWRM_CHECK_RESULT();
5334
5335         if (type)
5336                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
5337
5338         HWRM_UNLOCK();
5339
5340         return rc;
5341 }
5342
5343 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5344                                    uint16_t *dst_fid)
5345 {
5346         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5347         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5348                 bp->hwrm_cmd_resp_addr;
5349         int rc = 0;
5350
5351         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5352         req.src_fid = bp->fw_fid;
5353         req.tunnel_type = tun_type;
5354         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5355         HWRM_CHECK_RESULT();
5356
5357         if (dst_fid)
5358                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5359
5360         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5361
5362         HWRM_UNLOCK();
5363
5364         return rc;
5365 }
5366
5367 int bnxt_hwrm_set_mac(struct bnxt *bp)
5368 {
5369         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5370         struct hwrm_func_vf_cfg_input req = {0};
5371         int rc = 0;
5372
5373         if (!BNXT_VF(bp))
5374                 return 0;
5375
5376         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5377
5378         req.enables =
5379                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5380         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5381
5382         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5383
5384         HWRM_CHECK_RESULT();
5385
5386         HWRM_UNLOCK();
5387
5388         return rc;
5389 }
5390
5391 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5392 {
5393         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5394         struct hwrm_func_drv_if_change_input req = {0};
5395         uint32_t flags;
5396         int rc;
5397
5398         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5399                 return 0;
5400
5401         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5402          * If we issue FUNC_DRV_IF_CHANGE with flags down before
5403          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5404          */
5405         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5406                 return 0;
5407
5408         HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5409
5410         if (up)
5411                 req.flags =
5412                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5413
5414         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5415
5416         HWRM_CHECK_RESULT();
5417         flags = rte_le_to_cpu_32(resp->flags);
5418         HWRM_UNLOCK();
5419
5420         if (!up)
5421                 return 0;
5422
5423         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5424                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5425                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5426         }
5427
5428         return 0;
5429 }
5430
5431 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5432 {
5433         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5434         struct bnxt_error_recovery_info *info = bp->recovery_info;
5435         struct hwrm_error_recovery_qcfg_input req = {0};
5436         uint32_t flags = 0;
5437         unsigned int i;
5438         int rc;
5439
5440         /* Older FW does not have error recovery support */
5441         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5442                 return 0;
5443
5444         HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5445
5446         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5447
5448         HWRM_CHECK_RESULT();
5449
5450         flags = rte_le_to_cpu_32(resp->flags);
5451         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5452                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5453         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5454                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5455
5456         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5457             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5458                 rc = -EINVAL;
5459                 goto err;
5460         }
5461
5462         /* FW returned values are in units of 100msec */
5463         info->driver_polling_freq =
5464                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5465         info->master_func_wait_period =
5466                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5467         info->normal_func_wait_period =
5468                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5469         info->master_func_wait_period_after_reset =
5470                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5471         info->max_bailout_time_after_reset =
5472                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5473         info->status_regs[BNXT_FW_STATUS_REG] =
5474                 rte_le_to_cpu_32(resp->fw_health_status_reg);
5475         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5476                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5477         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5478                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5479         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5480                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
5481         info->reg_array_cnt =
5482                 rte_le_to_cpu_32(resp->reg_array_cnt);
5483
5484         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5485                 rc = -EINVAL;
5486                 goto err;
5487         }
5488
5489         for (i = 0; i < info->reg_array_cnt; i++) {
5490                 info->reset_reg[i] =
5491                         rte_le_to_cpu_32(resp->reset_reg[i]);
5492                 info->reset_reg_val[i] =
5493                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
5494                 info->delay_after_reset[i] =
5495                         resp->delay_after_reset[i];
5496         }
5497 err:
5498         HWRM_UNLOCK();
5499
5500         /* Map the FW status registers */
5501         if (!rc)
5502                 rc = bnxt_map_fw_health_status_regs(bp);
5503
5504         if (rc) {
5505                 rte_free(bp->recovery_info);
5506                 bp->recovery_info = NULL;
5507         }
5508         return rc;
5509 }
5510
5511 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5512 {
5513         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5514         struct hwrm_fw_reset_input req = {0};
5515         int rc;
5516
5517         if (!BNXT_PF(bp))
5518                 return -EOPNOTSUPP;
5519
5520         HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5521
5522         req.embedded_proc_type =
5523                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5524         req.selfrst_status =
5525                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5526         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5527
5528         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5529                                     BNXT_USE_KONG(bp));
5530
5531         HWRM_CHECK_RESULT();
5532         HWRM_UNLOCK();
5533
5534         return rc;
5535 }
5536
5537 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5538 {
5539         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5540         struct hwrm_port_ts_query_input req = {0};
5541         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5542         uint32_t flags = 0;
5543         int rc;
5544
5545         if (!ptp)
5546                 return 0;
5547
5548         HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5549
5550         switch (path) {
5551         case BNXT_PTP_FLAGS_PATH_TX:
5552                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5553                 break;
5554         case BNXT_PTP_FLAGS_PATH_RX:
5555                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5556                 break;
5557         case BNXT_PTP_FLAGS_CURRENT_TIME:
5558                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5559                 break;
5560         }
5561
5562         req.flags = rte_cpu_to_le_32(flags);
5563         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5564
5565         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5566
5567         HWRM_CHECK_RESULT();
5568
5569         if (timestamp) {
5570                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5571                 *timestamp |=
5572                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5573         }
5574         HWRM_UNLOCK();
5575
5576         return rc;
5577 }
5578
5579 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5580 {
5581         int rc = 0;
5582
5583         struct hwrm_cfa_counter_qcaps_input req = {0};
5584         struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5585
5586         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5587                 PMD_DRV_LOG(DEBUG,
5588                             "Not a PF or trusted VF. Command not supported\n");
5589                 return 0;
5590         }
5591
5592         HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5593         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5594         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5595
5596         HWRM_CHECK_RESULT();
5597         if (max_fc)
5598                 *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5599         HWRM_UNLOCK();
5600
5601         return 0;
5602 }
5603
5604 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5605 {
5606         int rc = 0;
5607         struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5608         struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5609
5610         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5611                 PMD_DRV_LOG(DEBUG,
5612                             "Not a PF or trusted VF. Command not supported\n");
5613                 return 0;
5614         }
5615
5616         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5617
5618         req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5619         req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5620         req.page_dir = rte_cpu_to_le_64(dma_addr);
5621
5622         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5623
5624         HWRM_CHECK_RESULT();
5625         if (ctx_id) {
5626                 *ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
5627                 PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5628         }
5629         HWRM_UNLOCK();
5630
5631         return 0;
5632 }
5633
5634 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5635 {
5636         int rc = 0;
5637         struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5638         struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5639
5640         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5641                 PMD_DRV_LOG(DEBUG,
5642                             "Not a PF or trusted VF. Command not supported\n");
5643                 return 0;
5644         }
5645
5646         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5647
5648         req.ctx_id = rte_cpu_to_le_16(ctx_id);
5649
5650         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5651
5652         HWRM_CHECK_RESULT();
5653         HWRM_UNLOCK();
5654
5655         return rc;
5656 }
5657
5658 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5659                               uint16_t cntr, uint16_t ctx_id,
5660                               uint32_t num_entries, bool enable)
5661 {
5662         struct hwrm_cfa_counter_cfg_input req = {0};
5663         struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5664         uint16_t flags = 0;
5665         int rc;
5666
5667         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5668                 PMD_DRV_LOG(DEBUG,
5669                             "Not a PF or trusted VF. Command not supported\n");
5670                 return 0;
5671         }
5672
5673         HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5674
5675         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5676         req.counter_type = rte_cpu_to_le_16(cntr);
5677         flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5678                 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5679         flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5680         if (dir == BNXT_DIR_RX)
5681                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5682         else if (dir == BNXT_DIR_TX)
5683                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5684         req.flags = rte_cpu_to_le_16(flags);
5685         req.ctx_id =  rte_cpu_to_le_16(ctx_id);
5686         req.num_entries = rte_cpu_to_le_32(num_entries);
5687
5688         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5689         HWRM_CHECK_RESULT();
5690         HWRM_UNLOCK();
5691
5692         return 0;
5693 }
5694
5695 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5696                                  enum bnxt_flow_dir dir,
5697                                  uint16_t cntr,
5698                                  uint16_t num_entries)
5699 {
5700         struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5701         struct hwrm_cfa_counter_qstats_input req = {0};
5702         uint16_t flow_ctx_id = 0;
5703         uint16_t flags = 0;
5704         int rc = 0;
5705
5706         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5707                 PMD_DRV_LOG(DEBUG,
5708                             "Not a PF or trusted VF. Command not supported\n");
5709                 return 0;
5710         }
5711
5712         if (dir == BNXT_DIR_RX) {
5713                 flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5714                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5715         } else if (dir == BNXT_DIR_TX) {
5716                 flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5717                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5718         }
5719
5720         HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5721         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5722         req.counter_type = rte_cpu_to_le_16(cntr);
5723         req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5724         req.num_entries = rte_cpu_to_le_16(num_entries);
5725         req.flags = rte_cpu_to_le_16(flags);
5726         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5727
5728         HWRM_CHECK_RESULT();
5729         HWRM_UNLOCK();
5730
5731         return 0;
5732 }
5733
5734 int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
5735                                 uint16_t *first_vf_id)
5736 {
5737         int rc = 0;
5738         struct hwrm_func_qcaps_input req = {.req_type = 0 };
5739         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5740
5741         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
5742
5743         req.fid = rte_cpu_to_le_16(fid);
5744
5745         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5746
5747         HWRM_CHECK_RESULT();
5748
5749         if (first_vf_id)
5750                 *first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
5751
5752         HWRM_UNLOCK();
5753
5754         return rc;
5755 }
5756
5757 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
5758 {
5759         struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5760         struct hwrm_cfa_pair_alloc_input req = {0};
5761         int rc;
5762
5763         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5764                 PMD_DRV_LOG(DEBUG,
5765                             "Not a PF or trusted VF. Command not supported\n");
5766                 return 0;
5767         }
5768
5769         HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
5770         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5771         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5772                  bp->eth_dev->data->name, rep_bp->vf_id);
5773
5774         req.pf_b_id = rep_bp->parent_pf_idx;
5775         req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5776                                                 rte_cpu_to_le_16(rep_bp->vf_id);
5777         req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
5778         req.host_b_id = 1; /* TBD - Confirm if this is OK */
5779
5780         req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
5781                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
5782         req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
5783                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
5784         req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
5785                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
5786         req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
5787                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
5788
5789         req.q_ab = rep_bp->rep_q_r2f;
5790         req.q_ba = rep_bp->rep_q_f2r;
5791         req.fc_ab = rep_bp->rep_fc_r2f;
5792         req.fc_ba = rep_bp->rep_fc_f2r;
5793
5794         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5795         HWRM_CHECK_RESULT();
5796
5797         HWRM_UNLOCK();
5798         PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
5799                     BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
5800         return rc;
5801 }
5802
5803 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
5804 {
5805         struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
5806         struct hwrm_cfa_pair_free_input req = {0};
5807         int rc;
5808
5809         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5810                 PMD_DRV_LOG(DEBUG,
5811                             "Not a PF or trusted VF. Command not supported\n");
5812                 return 0;
5813         }
5814
5815         HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
5816         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5817                  bp->eth_dev->data->name, rep_bp->vf_id);
5818         req.pf_b_id = rep_bp->parent_pf_idx;
5819         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5820         req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5821                                                 rte_cpu_to_le_16(rep_bp->vf_id);
5822         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5823         HWRM_CHECK_RESULT();
5824         HWRM_UNLOCK();
5825         PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
5826                     rep_bp->vf_id);
5827         return rc;
5828 }