net/bnxt: update copyright year
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14 #include <rte_io.h>
15
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #define HWRM_SPEC_CODE_1_8_3            0x10803
28 #define HWRM_VERSION_1_9_1              0x10901
29 #define HWRM_VERSION_1_9_2              0x10903
30
31 struct bnxt_plcmodes_cfg {
32         uint32_t        flags;
33         uint16_t        jumbo_thresh;
34         uint16_t        hds_offset;
35         uint16_t        hds_threshold;
36 };
37
38 static int page_getenum(size_t size)
39 {
40         if (size <= 1 << 4)
41                 return 4;
42         if (size <= 1 << 12)
43                 return 12;
44         if (size <= 1 << 13)
45                 return 13;
46         if (size <= 1 << 16)
47                 return 16;
48         if (size <= 1 << 21)
49                 return 21;
50         if (size <= 1 << 22)
51                 return 22;
52         if (size <= 1 << 30)
53                 return 30;
54         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55         return sizeof(int) * 8 - 1;
56 }
57
58 static int page_roundup(size_t size)
59 {
60         return 1 << page_getenum(size);
61 }
62
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
64                                   uint8_t *pg_attr,
65                                   uint64_t *pg_dir)
66 {
67         if (rmem->nr_pages == 0)
68                 return;
69
70         if (rmem->nr_pages > 1) {
71                 *pg_attr = 1;
72                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
73         } else {
74                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
75         }
76 }
77
78 /*
79  * HWRM Functions (sent to HWRM)
80  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
81  * HWRM command times out, or a negative error code if the HWRM
82  * command was failed by the FW.
83  */
84
85 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
86                                   uint32_t msg_len, bool use_kong_mb)
87 {
88         unsigned int i;
89         struct input *req = msg;
90         struct output *resp = bp->hwrm_cmd_resp_addr;
91         uint32_t *data = msg;
92         uint8_t *bar;
93         uint8_t *valid;
94         uint16_t max_req_len = bp->max_req_len;
95         struct hwrm_short_input short_input = { 0 };
96         uint16_t bar_offset = use_kong_mb ?
97                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
98         uint16_t mb_trigger_offset = use_kong_mb ?
99                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
100         uint32_t timeout;
101
102         /* Do not send HWRM commands to firmware in error state */
103         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
104                 return 0;
105
106         timeout = bp->hwrm_cmd_timeout;
107
108         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
109             msg_len > bp->max_req_len) {
110                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
111
112                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
113                 memcpy(short_cmd_req, req, msg_len);
114
115                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
116                 short_input.signature = rte_cpu_to_le_16(
117                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
118                 short_input.size = rte_cpu_to_le_16(msg_len);
119                 short_input.req_addr =
120                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
121
122                 data = (uint32_t *)&short_input;
123                 msg_len = sizeof(short_input);
124
125                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
126         }
127
128         /* Write request msg to hwrm channel */
129         for (i = 0; i < msg_len; i += 4) {
130                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
131                 rte_write32(*data, bar);
132                 data++;
133         }
134
135         /* Zero the rest of the request space */
136         for (; i < max_req_len; i += 4) {
137                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
138                 rte_write32(0, bar);
139         }
140
141         /* Ring channel doorbell */
142         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
143         rte_write32(1, bar);
144         /*
145          * Make sure the channel doorbell ring command complete before
146          * reading the response to avoid getting stale or invalid
147          * responses.
148          */
149         rte_io_mb();
150
151         /* Poll for the valid bit */
152         for (i = 0; i < timeout; i++) {
153                 /* Sanity check on the resp->resp_len */
154                 rte_io_rmb();
155                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
156                         /* Last byte of resp contains the valid key */
157                         valid = (uint8_t *)resp + resp->resp_len - 1;
158                         if (*valid == HWRM_RESP_VALID_KEY)
159                                 break;
160                 }
161                 rte_delay_us(1);
162         }
163
164         if (i >= timeout) {
165                 /* Suppress VER_GET timeout messages during reset recovery */
166                 if (bp->flags & BNXT_FLAG_FW_RESET &&
167                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
168                         return -ETIMEDOUT;
169
170                 PMD_DRV_LOG(ERR,
171                             "Error(timeout) sending msg 0x%04x, seq_id %d\n",
172                             req->req_type, req->seq_id);
173                 return -ETIMEDOUT;
174         }
175         return 0;
176 }
177
178 /*
179  * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
180  * spinlock, and does initial processing.
181  *
182  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
183  * releases the spinlock only if it returns. If the regular int return codes
184  * are not used by the function, HWRM_CHECK_RESULT() should not be used
185  * directly, rather it should be copied and modified to suit the function.
186  *
187  * HWRM_UNLOCK() must be called after all response processing is completed.
188  */
189 #define HWRM_PREP(req, type, kong) do { \
190         rte_spinlock_lock(&bp->hwrm_lock); \
191         if (bp->hwrm_cmd_resp_addr == NULL) { \
192                 rte_spinlock_unlock(&bp->hwrm_lock); \
193                 return -EACCES; \
194         } \
195         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
196         (req)->req_type = rte_cpu_to_le_16(type); \
197         (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
198         (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
199                 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
200         (req)->target_id = rte_cpu_to_le_16(0xffff); \
201         (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
202 } while (0)
203
204 #define HWRM_CHECK_RESULT_SILENT() do {\
205         if (rc) { \
206                 rte_spinlock_unlock(&bp->hwrm_lock); \
207                 return rc; \
208         } \
209         if (resp->error_code) { \
210                 rc = rte_le_to_cpu_16(resp->error_code); \
211                 rte_spinlock_unlock(&bp->hwrm_lock); \
212                 return rc; \
213         } \
214 } while (0)
215
216 #define HWRM_CHECK_RESULT() do {\
217         if (rc) { \
218                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
219                 rte_spinlock_unlock(&bp->hwrm_lock); \
220                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
221                         rc = -EACCES; \
222                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
223                         rc = -ENOSPC; \
224                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
225                         rc = -EINVAL; \
226                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
227                         rc = -ENOTSUP; \
228                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
229                         rc = -EAGAIN; \
230                 else if (rc > 0) \
231                         rc = -EIO; \
232                 return rc; \
233         } \
234         if (resp->error_code) { \
235                 rc = rte_le_to_cpu_16(resp->error_code); \
236                 if (resp->resp_len >= 16) { \
237                         struct hwrm_err_output *tmp_hwrm_err_op = \
238                                                 (void *)resp; \
239                         PMD_DRV_LOG(ERR, \
240                                 "error %d:%d:%08x:%04x\n", \
241                                 rc, tmp_hwrm_err_op->cmd_err, \
242                                 rte_le_to_cpu_32(\
243                                         tmp_hwrm_err_op->opaque_0), \
244                                 rte_le_to_cpu_16(\
245                                         tmp_hwrm_err_op->opaque_1)); \
246                 } else { \
247                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
248                 } \
249                 rte_spinlock_unlock(&bp->hwrm_lock); \
250                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
251                         rc = -EACCES; \
252                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
253                         rc = -ENOSPC; \
254                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
255                         rc = -EINVAL; \
256                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
257                         rc = -ENOTSUP; \
258                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
259                         rc = -EAGAIN; \
260                 else if (rc > 0) \
261                         rc = -EIO; \
262                 return rc; \
263         } \
264 } while (0)
265
266 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
267
268 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
269                                 bool use_kong_mb,
270                                 uint16_t msg_type,
271                                 void *msg,
272                                 uint32_t msg_len,
273                                 void *resp_msg,
274                                 uint32_t resp_len)
275 {
276         int rc = 0;
277         bool mailbox = BNXT_USE_CHIMP_MB;
278         struct input *req = msg;
279         struct output *resp = bp->hwrm_cmd_resp_addr;
280
281         if (use_kong_mb)
282                 mailbox = BNXT_USE_KONG(bp);
283
284         HWRM_PREP(req, msg_type, mailbox);
285
286         rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
287
288         HWRM_CHECK_RESULT();
289
290         if (resp_msg)
291                 memcpy(resp_msg, resp, resp_len);
292
293         HWRM_UNLOCK();
294
295         return rc;
296 }
297
298 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
299                                   bool use_kong_mb,
300                                   uint16_t tf_type,
301                                   uint16_t tf_subtype,
302                                   uint32_t *tf_response_code,
303                                   void *msg,
304                                   uint32_t msg_len,
305                                   void *response,
306                                   uint32_t response_len)
307 {
308         int rc = 0;
309         struct hwrm_cfa_tflib_input req = { .req_type = 0 };
310         struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
311         bool mailbox = BNXT_USE_CHIMP_MB;
312
313         if (msg_len > sizeof(req.tf_req))
314                 return -ENOMEM;
315
316         if (use_kong_mb)
317                 mailbox = BNXT_USE_KONG(bp);
318
319         HWRM_PREP(&req, HWRM_TF, mailbox);
320         /* Build request using the user supplied request payload.
321          * TLV request size is checked at build time against HWRM
322          * request max size, thus no checking required.
323          */
324         req.tf_type = tf_type;
325         req.tf_subtype = tf_subtype;
326         memcpy(req.tf_req, msg, msg_len);
327
328         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
329         HWRM_CHECK_RESULT();
330
331         /* Copy the resp to user provided response buffer */
332         if (response != NULL)
333                 /* Post process response data. We need to copy only
334                  * the 'payload' as the HWRM data structure really is
335                  * HWRM header + msg header + payload and the TFLIB
336                  * only provided a payload place holder.
337                  */
338                 if (response_len != 0) {
339                         memcpy(response,
340                                resp->tf_resp,
341                                response_len);
342                 }
343
344         /* Extract the internal tflib response code */
345         *tf_response_code = resp->tf_resp_code;
346         HWRM_UNLOCK();
347
348         return rc;
349 }
350
351 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
352 {
353         int rc = 0;
354         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
355         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
356
357         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
358         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
359         req.mask = 0;
360
361         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
362
363         HWRM_CHECK_RESULT();
364         HWRM_UNLOCK();
365
366         return rc;
367 }
368
369 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
370                                  struct bnxt_vnic_info *vnic,
371                                  uint16_t vlan_count,
372                                  struct bnxt_vlan_table_entry *vlan_table)
373 {
374         int rc = 0;
375         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
376         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
377         uint32_t mask = 0;
378
379         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
380                 return rc;
381
382         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
383         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
384
385         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
386                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
387         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
388                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
389
390         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
391                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
392
393         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
394                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
395         } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
396                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
397                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
398                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
399         }
400         if (vlan_table) {
401                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
402                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
403                 req.vlan_tag_tbl_addr =
404                         rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
405                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
406         }
407         req.mask = rte_cpu_to_le_32(mask);
408
409         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
410
411         HWRM_CHECK_RESULT();
412         HWRM_UNLOCK();
413
414         return rc;
415 }
416
417 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
418                         uint16_t vlan_count,
419                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
420 {
421         int rc = 0;
422         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
423         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
424                                                 bp->hwrm_cmd_resp_addr;
425
426         /*
427          * Older HWRM versions did not support this command, and the set_rx_mask
428          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
429          * removed from set_rx_mask call, and this command was added.
430          *
431          * This command is also present from 1.7.8.11 and higher,
432          * as well as 1.7.8.0
433          */
434         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
435                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
436                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
437                                         (11)))
438                                 return 0;
439                 }
440         }
441         HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
442         req.fid = rte_cpu_to_le_16(fid);
443
444         req.vlan_tag_mask_tbl_addr =
445                 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
446         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
447
448         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
449
450         HWRM_CHECK_RESULT();
451         HWRM_UNLOCK();
452
453         return rc;
454 }
455
456 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
457                              struct bnxt_filter_info *filter)
458 {
459         int rc = 0;
460         struct bnxt_filter_info *l2_filter = filter;
461         struct bnxt_vnic_info *vnic = NULL;
462         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
463         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
464
465         if (filter->fw_l2_filter_id == UINT64_MAX)
466                 return 0;
467
468         if (filter->matching_l2_fltr_ptr)
469                 l2_filter = filter->matching_l2_fltr_ptr;
470
471         PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
472                     filter, l2_filter, l2_filter->l2_ref_cnt);
473
474         if (l2_filter->l2_ref_cnt == 0)
475                 return 0;
476
477         if (l2_filter->l2_ref_cnt > 0)
478                 l2_filter->l2_ref_cnt--;
479
480         if (l2_filter->l2_ref_cnt > 0)
481                 return 0;
482
483         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
484
485         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
486
487         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
488
489         HWRM_CHECK_RESULT();
490         HWRM_UNLOCK();
491
492         filter->fw_l2_filter_id = UINT64_MAX;
493         if (l2_filter->l2_ref_cnt == 0) {
494                 vnic = l2_filter->vnic;
495                 if (vnic) {
496                         STAILQ_REMOVE(&vnic->filter, l2_filter,
497                                       bnxt_filter_info, next);
498                         bnxt_free_filter(bp, l2_filter);
499                 }
500         }
501
502         return 0;
503 }
504
505 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
506                          uint16_t dst_id,
507                          struct bnxt_filter_info *filter)
508 {
509         int rc = 0;
510         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
511         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
512         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
513         const struct rte_eth_vmdq_rx_conf *conf =
514                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
515         uint32_t enables = 0;
516         uint16_t j = dst_id - 1;
517
518         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
519         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
520             conf->pool_map[j].pools & (1UL << j)) {
521                 PMD_DRV_LOG(DEBUG,
522                         "Add vlan %u to vmdq pool %u\n",
523                         conf->pool_map[j].vlan_id, j);
524
525                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
526                 filter->enables |=
527                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
528                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
529         }
530
531         if (filter->fw_l2_filter_id != UINT64_MAX)
532                 bnxt_hwrm_clear_l2_filter(bp, filter);
533
534         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
535
536         /* PMD does not support XDP and RoCE */
537         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
538                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
539         req.flags = rte_cpu_to_le_32(filter->flags);
540
541         enables = filter->enables |
542               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
543         req.dst_id = rte_cpu_to_le_16(dst_id);
544
545         if (enables &
546             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
547                 memcpy(req.l2_addr, filter->l2_addr,
548                        RTE_ETHER_ADDR_LEN);
549         if (enables &
550             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
551                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
552                        RTE_ETHER_ADDR_LEN);
553         if (enables &
554             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
555                 req.l2_ovlan = filter->l2_ovlan;
556         if (enables &
557             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
558                 req.l2_ivlan = filter->l2_ivlan;
559         if (enables &
560             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
561                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
562         if (enables &
563             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
564                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
565         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
566                 req.src_id = rte_cpu_to_le_32(filter->src_id);
567         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
568                 req.src_type = filter->src_type;
569         if (filter->pri_hint) {
570                 req.pri_hint = filter->pri_hint;
571                 req.l2_filter_id_hint =
572                         rte_cpu_to_le_64(filter->l2_filter_id_hint);
573         }
574
575         req.enables = rte_cpu_to_le_32(enables);
576
577         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
578
579         HWRM_CHECK_RESULT();
580
581         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
582         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
583         HWRM_UNLOCK();
584
585         filter->l2_ref_cnt++;
586
587         return rc;
588 }
589
590 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
591 {
592         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
593         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
594         uint32_t flags = 0;
595         int rc;
596
597         if (!ptp)
598                 return 0;
599
600         HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
601
602         if (ptp->rx_filter)
603                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
604         else
605                 flags |=
606                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
607         if (ptp->tx_tstamp_en)
608                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
609         else
610                 flags |=
611                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
612         req.flags = rte_cpu_to_le_32(flags);
613         req.enables = rte_cpu_to_le_32
614                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
615         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
616
617         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
618         HWRM_UNLOCK();
619
620         return rc;
621 }
622
623 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
624 {
625         int rc = 0;
626         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
627         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
628         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
629
630         if (ptp)
631                 return 0;
632
633         HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
634
635         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
636
637         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
638
639         HWRM_CHECK_RESULT();
640
641         if (!BNXT_CHIP_P5(bp) &&
642             !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
643                 return 0;
644
645         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
646                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
647
648         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
649         if (!ptp)
650                 return -ENOMEM;
651
652         if (!BNXT_CHIP_P5(bp)) {
653                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
654                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
655                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
656                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
657                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
658                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
659                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
660                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
661                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
662                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
663                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
664                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
665                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
666                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
667                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
668                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
669                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
670                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
671         }
672
673         ptp->bp = bp;
674         bp->ptp_cfg = ptp;
675
676         return 0;
677 }
678
679 void bnxt_hwrm_free_vf_info(struct bnxt *bp)
680 {
681         int i;
682
683         for (i = 0; i < bp->pf->max_vfs; i++) {
684                 rte_free(bp->pf->vf_info[i].vlan_table);
685                 bp->pf->vf_info[i].vlan_table = NULL;
686                 rte_free(bp->pf->vf_info[i].vlan_as_table);
687                 bp->pf->vf_info[i].vlan_as_table = NULL;
688         }
689         rte_free(bp->pf->vf_info);
690         bp->pf->vf_info = NULL;
691 }
692
693 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
694 {
695         int rc = 0;
696         struct hwrm_func_qcaps_input req = {.req_type = 0 };
697         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
698         uint16_t new_max_vfs;
699         uint32_t flags;
700         int i;
701
702         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
703
704         req.fid = rte_cpu_to_le_16(0xffff);
705
706         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
707
708         HWRM_CHECK_RESULT();
709
710         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
711         flags = rte_le_to_cpu_32(resp->flags);
712         if (BNXT_PF(bp)) {
713                 bp->pf->port_id = resp->port_id;
714                 bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
715                 bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
716                 new_max_vfs = bp->pdev->max_vfs;
717                 if (new_max_vfs != bp->pf->max_vfs) {
718                         if (bp->pf->vf_info)
719                                 bnxt_hwrm_free_vf_info(bp);
720                         bp->pf->vf_info = rte_zmalloc("bnxt_vf_info",
721                             sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
722                         if (bp->pf->vf_info == NULL) {
723                                 PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
724                                 HWRM_UNLOCK();
725                                 return -ENOMEM;
726                         }
727                         bp->pf->max_vfs = new_max_vfs;
728                         for (i = 0; i < new_max_vfs; i++) {
729                                 bp->pf->vf_info[i].fid =
730                                         bp->pf->first_vf_id + i;
731                                 bp->pf->vf_info[i].vlan_table =
732                                         rte_zmalloc("VF VLAN table",
733                                                     getpagesize(),
734                                                     getpagesize());
735                                 if (bp->pf->vf_info[i].vlan_table == NULL)
736                                         PMD_DRV_LOG(ERR,
737                                         "Fail to alloc VLAN table for VF %d\n",
738                                         i);
739                                 else
740                                         rte_mem_lock_page(
741                                                 bp->pf->vf_info[i].vlan_table);
742                                 bp->pf->vf_info[i].vlan_as_table =
743                                         rte_zmalloc("VF VLAN AS table",
744                                                     getpagesize(),
745                                                     getpagesize());
746                                 if (bp->pf->vf_info[i].vlan_as_table == NULL)
747                                         PMD_DRV_LOG(ERR,
748                                         "Alloc VLAN AS table for VF %d fail\n",
749                                         i);
750                                 else
751                                         rte_mem_lock_page(
752                                               bp->pf->vf_info[i].vlan_as_table);
753                                 STAILQ_INIT(&bp->pf->vf_info[i].filter);
754                         }
755                 }
756         }
757
758         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
759         if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
760                 bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
761                 memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
762         } else {
763                 bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
764         }
765         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
766         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
767         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
768         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
769         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
770         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
771         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
772         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
773                 bp->max_l2_ctx += bp->max_rx_em_flows;
774         /* TODO: For now, do not support VMDq/RFS on VFs. */
775         if (BNXT_PF(bp)) {
776                 if (bp->pf->max_vfs)
777                         bp->max_vnics = 1;
778                 else
779                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
780         } else {
781                 bp->max_vnics = 1;
782         }
783         PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
784                     bp->max_l2_ctx, bp->max_vnics);
785         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
786         if (BNXT_PF(bp)) {
787                 bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
788                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
789                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
790                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
791                         HWRM_UNLOCK();
792                         bnxt_hwrm_ptp_qcfg(bp);
793                 }
794         }
795
796         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
797                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
798
799         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
800                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
801                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
802         }
803
804         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
805                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
806
807         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
808                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
809
810         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
811                 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
812
813         HWRM_UNLOCK();
814
815         return rc;
816 }
817
818 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
819 {
820         int rc;
821
822         rc = __bnxt_hwrm_func_qcaps(bp);
823         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
824                 rc = bnxt_alloc_ctx_mem(bp);
825                 if (rc)
826                         return rc;
827
828                 /* On older FW,
829                  * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
830                  * But the error can be ignored. Return success.
831                  */
832                 rc = bnxt_hwrm_func_resc_qcaps(bp);
833                 if (!rc)
834                         bp->flags |= BNXT_FLAG_NEW_RM;
835         }
836
837         return 0;
838 }
839
840 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
841 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
842 {
843         int rc = 0;
844         uint32_t flags;
845         struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
846         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
847
848         HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
849
850         req.target_id = rte_cpu_to_le_16(0xffff);
851
852         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
853
854         HWRM_CHECK_RESULT();
855
856         flags = rte_le_to_cpu_32(resp->flags);
857
858         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
859                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
860                 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
861         }
862
863         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
864                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
865
866         if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP)
867                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_RX_CMPL_V2;
868
869         bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
870
871         HWRM_UNLOCK();
872
873         return rc;
874 }
875
876 int bnxt_hwrm_func_reset(struct bnxt *bp)
877 {
878         int rc = 0;
879         struct hwrm_func_reset_input req = {.req_type = 0 };
880         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
881
882         HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
883
884         req.enables = rte_cpu_to_le_32(0);
885
886         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
887
888         HWRM_CHECK_RESULT();
889         HWRM_UNLOCK();
890
891         return rc;
892 }
893
894 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
895 {
896         int rc;
897         uint32_t flags = 0;
898         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
899         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
900
901         if (bp->flags & BNXT_FLAG_REGISTERED)
902                 return 0;
903
904         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
905                 flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
906         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
907                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
908
909         /* PFs and trusted VFs should indicate the support of the
910          * Master capability on non Stingray platform
911          */
912         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
913                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
914
915         HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
916         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
917                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
918         req.ver_maj = RTE_VER_YEAR;
919         req.ver_min = RTE_VER_MONTH;
920         req.ver_upd = RTE_VER_MINOR;
921
922         if (BNXT_PF(bp)) {
923                 req.enables |= rte_cpu_to_le_32(
924                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
925                 memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
926                        RTE_MIN(sizeof(req.vf_req_fwd),
927                                sizeof(bp->pf->vf_req_fwd)));
928         }
929
930         req.flags = rte_cpu_to_le_32(flags);
931
932         req.async_event_fwd[0] |=
933                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
934                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
935                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
936                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
937                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
938         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
939                 req.async_event_fwd[0] |=
940                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
941         req.async_event_fwd[1] |=
942                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
943                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
944         if (BNXT_PF(bp))
945                 req.async_event_fwd[1] |=
946                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
947
948         if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
949                 req.async_event_fwd[1] |=
950                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
951
952         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
953
954         HWRM_CHECK_RESULT();
955
956         flags = rte_le_to_cpu_32(resp->flags);
957         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
958                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
959
960         HWRM_UNLOCK();
961
962         bp->flags |= BNXT_FLAG_REGISTERED;
963
964         return rc;
965 }
966
967 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
968 {
969         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
970                 return 0;
971
972         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
973 }
974
975 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
976 {
977         int rc;
978         uint32_t flags = 0;
979         uint32_t enables;
980         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
981         struct hwrm_func_vf_cfg_input req = {0};
982
983         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
984
985         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
986                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
987                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
988                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
989                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
990
991         if (BNXT_HAS_RING_GRPS(bp)) {
992                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
993                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
994         }
995
996         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
997         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
998                                             AGG_RING_MULTIPLIER);
999         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
1000         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
1001                                               bp->tx_nr_rings +
1002                                               BNXT_NUM_ASYNC_CPR(bp));
1003         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
1004         if (bp->vf_resv_strategy ==
1005             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
1006                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1007                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1008                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1009                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1010                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1011                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1012         } else if (bp->vf_resv_strategy ==
1013                    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1014                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1015                 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1016         }
1017
1018         if (test)
1019                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1020                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1021                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1022                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1023                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1024                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1025
1026         if (test && BNXT_HAS_RING_GRPS(bp))
1027                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1028
1029         req.flags = rte_cpu_to_le_32(flags);
1030         req.enables |= rte_cpu_to_le_32(enables);
1031
1032         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1033
1034         if (test)
1035                 HWRM_CHECK_RESULT_SILENT();
1036         else
1037                 HWRM_CHECK_RESULT();
1038
1039         HWRM_UNLOCK();
1040         return rc;
1041 }
1042
1043 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1044 {
1045         int rc;
1046         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1047         struct hwrm_func_resource_qcaps_input req = {0};
1048
1049         HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1050         req.fid = rte_cpu_to_le_16(0xffff);
1051
1052         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1053
1054         HWRM_CHECK_RESULT_SILENT();
1055
1056         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1057         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1058         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1059         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1060         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1061         /* func_resource_qcaps does not return max_rx_em_flows.
1062          * So use the value provided by func_qcaps.
1063          */
1064         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1065         if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
1066                 bp->max_l2_ctx += bp->max_rx_em_flows;
1067         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1068         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1069         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1070         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1071         if (bp->vf_resv_strategy >
1072             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1073                 bp->vf_resv_strategy =
1074                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1075
1076         HWRM_UNLOCK();
1077         return rc;
1078 }
1079
1080 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1081 {
1082         int rc = 0;
1083         struct hwrm_ver_get_input req = {.req_type = 0 };
1084         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1085         uint32_t fw_version;
1086         uint16_t max_resp_len;
1087         char type[RTE_MEMZONE_NAMESIZE];
1088         uint32_t dev_caps_cfg;
1089
1090         bp->max_req_len = HWRM_MAX_REQ_LEN;
1091         bp->hwrm_cmd_timeout = timeout;
1092         HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1093
1094         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1095         req.hwrm_intf_min = HWRM_VERSION_MINOR;
1096         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1097
1098         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1099
1100         if (bp->flags & BNXT_FLAG_FW_RESET)
1101                 HWRM_CHECK_RESULT_SILENT();
1102         else
1103                 HWRM_CHECK_RESULT();
1104
1105         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d.%d\n",
1106                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1107                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1108                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b,
1109                 resp->hwrm_fw_rsvd_8b);
1110         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
1111                      (resp->hwrm_fw_min_8b << 16) |
1112                      (resp->hwrm_fw_bld_8b << 8) |
1113                      resp->hwrm_fw_rsvd_8b;
1114         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1115                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1116
1117         fw_version = resp->hwrm_intf_maj_8b << 16;
1118         fw_version |= resp->hwrm_intf_min_8b << 8;
1119         fw_version |= resp->hwrm_intf_upd_8b;
1120         bp->hwrm_spec_code = fw_version;
1121
1122         /* def_req_timeout value is in milliseconds */
1123         bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1124         /* convert timeout to usec */
1125         bp->hwrm_cmd_timeout *= 1000;
1126         if (!bp->hwrm_cmd_timeout)
1127                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1128
1129         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1130                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1131                 rc = -EINVAL;
1132                 goto error;
1133         }
1134
1135         if (bp->max_req_len > resp->max_req_win_len) {
1136                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
1137                 rc = -EINVAL;
1138         }
1139
1140         bp->chip_num = rte_le_to_cpu_16(resp->chip_num);
1141
1142         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1143         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1144         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1145                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1146
1147         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1148         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1149
1150         if (bp->max_resp_len != max_resp_len) {
1151                 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
1152                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1153                         bp->pdev->addr.devid, bp->pdev->addr.function);
1154
1155                 rte_free(bp->hwrm_cmd_resp_addr);
1156
1157                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
1158                 if (bp->hwrm_cmd_resp_addr == NULL) {
1159                         rc = -ENOMEM;
1160                         goto error;
1161                 }
1162                 bp->hwrm_cmd_resp_dma_addr =
1163                         rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
1164                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1165                         PMD_DRV_LOG(ERR,
1166                         "Unable to map response buffer to physical memory.\n");
1167                         rc = -ENOMEM;
1168                         goto error;
1169                 }
1170                 bp->max_resp_len = max_resp_len;
1171         }
1172
1173         if ((dev_caps_cfg &
1174                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1175             (dev_caps_cfg &
1176              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1177                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1178                 bp->flags |= BNXT_FLAG_SHORT_CMD;
1179         }
1180
1181         if (((dev_caps_cfg &
1182               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1183              (dev_caps_cfg &
1184               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1185             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1186                 sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1187                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1188                         bp->pdev->addr.devid, bp->pdev->addr.function);
1189
1190                 rte_free(bp->hwrm_short_cmd_req_addr);
1191
1192                 bp->hwrm_short_cmd_req_addr =
1193                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1194                 if (bp->hwrm_short_cmd_req_addr == NULL) {
1195                         rc = -ENOMEM;
1196                         goto error;
1197                 }
1198                 bp->hwrm_short_cmd_req_dma_addr =
1199                         rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1200                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1201                         rte_free(bp->hwrm_short_cmd_req_addr);
1202                         PMD_DRV_LOG(ERR,
1203                                 "Unable to map buffer to physical memory.\n");
1204                         rc = -ENOMEM;
1205                         goto error;
1206                 }
1207         }
1208         if (dev_caps_cfg &
1209             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1210                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1211                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1212         }
1213         if (dev_caps_cfg &
1214             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1215                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1216         if (dev_caps_cfg &
1217             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1218                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1219                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1220         }
1221
1222         if (dev_caps_cfg &
1223             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1224                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1225                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1226         }
1227
1228
1229 error:
1230         HWRM_UNLOCK();
1231         return rc;
1232 }
1233
1234 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1235 {
1236         int rc;
1237         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1238         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1239
1240         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1241                 return 0;
1242
1243         HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1244         req.flags = flags;
1245
1246         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1247
1248         HWRM_CHECK_RESULT();
1249         HWRM_UNLOCK();
1250
1251         return rc;
1252 }
1253
1254 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1255 {
1256         int rc = 0;
1257         struct hwrm_port_phy_cfg_input req = {0};
1258         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1259         uint32_t enables = 0;
1260
1261         HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1262
1263         if (conf->link_up) {
1264                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1265                 if (bp->link_info->auto_mode && conf->link_speed) {
1266                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1267                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1268                 }
1269
1270                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1271                 /*
1272                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1273                  * any auto mode, even "none".
1274                  */
1275                 if (!conf->link_speed) {
1276                         /* No speeds specified. Enable AutoNeg - all speeds */
1277                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1278                         req.auto_mode =
1279                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1280                 } else {
1281                         if (bp->link_info->link_signal_mode) {
1282                                 enables |=
1283                                 HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
1284                                 req.force_pam4_link_speed =
1285                                         rte_cpu_to_le_16(conf->link_speed);
1286                         } else {
1287                                 req.force_link_speed =
1288                                         rte_cpu_to_le_16(conf->link_speed);
1289                         }
1290                 }
1291                 /* AutoNeg - Advertise speeds specified. */
1292                 if (conf->auto_link_speed_mask &&
1293                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1294                         req.auto_mode =
1295                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1296                         req.auto_link_speed_mask =
1297                                 conf->auto_link_speed_mask;
1298                         if (conf->auto_pam4_link_speeds) {
1299                                 enables |=
1300                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
1301                                 req.auto_link_pam4_speed_mask =
1302                                         conf->auto_pam4_link_speeds;
1303                         } else {
1304                                 enables |=
1305                                 HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
1306                         }
1307                 }
1308                 if (conf->auto_link_speed &&
1309                 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
1310                         enables |=
1311                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
1312
1313                 req.auto_duplex = conf->duplex;
1314                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1315                 req.auto_pause = conf->auto_pause;
1316                 req.force_pause = conf->force_pause;
1317                 /* Set force_pause if there is no auto or if there is a force */
1318                 if (req.auto_pause && !req.force_pause)
1319                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1320                 else
1321                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1322
1323                 req.enables = rte_cpu_to_le_32(enables);
1324         } else {
1325                 req.flags =
1326                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1327                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1328         }
1329
1330         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1331
1332         HWRM_CHECK_RESULT();
1333         HWRM_UNLOCK();
1334
1335         return rc;
1336 }
1337
1338 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1339                                    struct bnxt_link_info *link_info)
1340 {
1341         int rc = 0;
1342         struct hwrm_port_phy_qcfg_input req = {0};
1343         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1344
1345         HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1346
1347         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1348
1349         HWRM_CHECK_RESULT();
1350
1351         link_info->phy_link_status = resp->link;
1352         link_info->link_up =
1353                 (link_info->phy_link_status ==
1354                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1355         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1356         link_info->duplex = resp->duplex_cfg;
1357         link_info->pause = resp->pause;
1358         link_info->auto_pause = resp->auto_pause;
1359         link_info->force_pause = resp->force_pause;
1360         link_info->auto_mode = resp->auto_mode;
1361         link_info->phy_type = resp->phy_type;
1362         link_info->media_type = resp->media_type;
1363
1364         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1365         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1366         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1367         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1368         link_info->phy_ver[0] = resp->phy_maj;
1369         link_info->phy_ver[1] = resp->phy_min;
1370         link_info->phy_ver[2] = resp->phy_bld;
1371         link_info->link_signal_mode =
1372                 rte_le_to_cpu_16(resp->active_fec_signal_mode);
1373         link_info->force_pam4_link_speed =
1374                         rte_le_to_cpu_16(resp->force_pam4_link_speed);
1375         link_info->support_pam4_speeds =
1376                         rte_le_to_cpu_16(resp->support_pam4_speeds);
1377         link_info->auto_pam4_link_speeds =
1378                         rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
1379         HWRM_UNLOCK();
1380
1381         PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
1382                     link_info->link_speed, link_info->auto_mode,
1383                     link_info->auto_link_speed, link_info->auto_link_speed_mask,
1384                     link_info->support_speeds, link_info->force_link_speed);
1385         PMD_DRV_LOG(DEBUG, "Link Signal:%d,PAM::Auto:%x,Support:%x,Force:%x\n",
1386                     link_info->link_signal_mode,
1387                     link_info->auto_pam4_link_speeds,
1388                     link_info->support_pam4_speeds,
1389                     link_info->force_pam4_link_speed);
1390         return rc;
1391 }
1392
1393 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1394 {
1395         int rc = 0;
1396         struct hwrm_port_phy_qcaps_input req = {0};
1397         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1398         struct bnxt_link_info *link_info = bp->link_info;
1399
1400         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1401                 return 0;
1402
1403         HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1404
1405         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1406
1407         HWRM_CHECK_RESULT();
1408
1409         bp->port_cnt = resp->port_cnt;
1410         if (resp->supported_speeds_auto_mode)
1411                 link_info->support_auto_speeds =
1412                         rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
1413         if (resp->supported_pam4_speeds_auto_mode)
1414                 link_info->support_pam4_auto_speeds =
1415                         rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
1416
1417         HWRM_UNLOCK();
1418
1419         return 0;
1420 }
1421
1422 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1423 {
1424         int i = 0;
1425
1426         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1427                 if (bp->tx_cos_queue[i].profile ==
1428                     HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1429                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1430                         return true;
1431                 }
1432         }
1433         return false;
1434 }
1435
1436 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1437 {
1438         int i = 0;
1439
1440         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1441                 if (bp->tx_cos_queue[i].profile !=
1442                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1443                     bp->tx_cos_queue[i].id !=
1444                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1445                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1446                         break;
1447                 }
1448         }
1449 }
1450
1451 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1452 {
1453         int rc = 0;
1454         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1455         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1456         uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1457         int i;
1458
1459 get_rx_info:
1460         HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1461
1462         req.flags = rte_cpu_to_le_32(dir);
1463         /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1464         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1465             !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1466                 req.drv_qmap_cap =
1467                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1468         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1469
1470         HWRM_CHECK_RESULT();
1471
1472         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1473                 GET_TX_QUEUE_INFO(0);
1474                 GET_TX_QUEUE_INFO(1);
1475                 GET_TX_QUEUE_INFO(2);
1476                 GET_TX_QUEUE_INFO(3);
1477                 GET_TX_QUEUE_INFO(4);
1478                 GET_TX_QUEUE_INFO(5);
1479                 GET_TX_QUEUE_INFO(6);
1480                 GET_TX_QUEUE_INFO(7);
1481         } else  {
1482                 GET_RX_QUEUE_INFO(0);
1483                 GET_RX_QUEUE_INFO(1);
1484                 GET_RX_QUEUE_INFO(2);
1485                 GET_RX_QUEUE_INFO(3);
1486                 GET_RX_QUEUE_INFO(4);
1487                 GET_RX_QUEUE_INFO(5);
1488                 GET_RX_QUEUE_INFO(6);
1489                 GET_RX_QUEUE_INFO(7);
1490         }
1491
1492         HWRM_UNLOCK();
1493
1494         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1495                 goto done;
1496
1497         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1498                 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1499         } else {
1500                 int j;
1501
1502                 /* iterate and find the COSq profile to use for Tx */
1503                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1504                         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1505                                 if (bp->tx_cos_queue[i].id != 0xff)
1506                                         bp->tx_cosq_id[j++] =
1507                                                 bp->tx_cos_queue[i].id;
1508                         }
1509                 } else {
1510                         /* When CoS classification is disabled, for normal NIC
1511                          * operations, ideally we should look to use LOSSY.
1512                          * If not found, fallback to the first valid profile
1513                          */
1514                         if (!bnxt_find_lossy_profile(bp))
1515                                 bnxt_find_first_valid_profile(bp);
1516
1517                 }
1518         }
1519
1520         bp->max_tc = resp->max_configurable_queues;
1521         bp->max_lltc = resp->max_configurable_lossless_queues;
1522         if (bp->max_tc > BNXT_MAX_QUEUE)
1523                 bp->max_tc = BNXT_MAX_QUEUE;
1524         bp->max_q = bp->max_tc;
1525
1526         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1527                 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1528                 goto get_rx_info;
1529         }
1530
1531 done:
1532         return rc;
1533 }
1534
1535 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1536                          struct bnxt_ring *ring,
1537                          uint32_t ring_type, uint32_t map_index,
1538                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1539                          uint16_t tx_cosq_id)
1540 {
1541         int rc = 0;
1542         uint32_t enables = 0;
1543         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1544         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1545         struct rte_mempool *mb_pool;
1546         uint16_t rx_buf_size;
1547
1548         HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1549
1550         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1551         req.fbo = rte_cpu_to_le_32(0);
1552         /* Association of ring index with doorbell index */
1553         req.logical_id = rte_cpu_to_le_16(map_index);
1554         req.length = rte_cpu_to_le_32(ring->ring_size);
1555
1556         switch (ring_type) {
1557         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1558                 req.ring_type = ring_type;
1559                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1560                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1561                 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1562                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1563                         enables |=
1564                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1565                 break;
1566         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1567                 req.ring_type = ring_type;
1568                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1569                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1570                 if (BNXT_CHIP_P5(bp)) {
1571                         mb_pool = bp->rx_queues[0]->mb_pool;
1572                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1573                                       RTE_PKTMBUF_HEADROOM;
1574                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1575                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1576                         enables |=
1577                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1578                 }
1579                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1580                         enables |=
1581                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1582                 break;
1583         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1584                 req.ring_type = ring_type;
1585                 if (BNXT_HAS_NQ(bp)) {
1586                         /* Association of cp ring with nq */
1587                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1588                         enables |=
1589                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1590                 }
1591                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1592                 break;
1593         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1594                 req.ring_type = ring_type;
1595                 req.page_size = BNXT_PAGE_SHFT;
1596                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1597                 break;
1598         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1599                 req.ring_type = ring_type;
1600                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1601
1602                 mb_pool = bp->rx_queues[0]->mb_pool;
1603                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1604                               RTE_PKTMBUF_HEADROOM;
1605                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1606                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1607
1608                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1609                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1610                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1611                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1612                 break;
1613         default:
1614                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1615                         ring_type);
1616                 HWRM_UNLOCK();
1617                 return -EINVAL;
1618         }
1619         req.enables = rte_cpu_to_le_32(enables);
1620
1621         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1622
1623         if (rc || resp->error_code) {
1624                 if (rc == 0 && resp->error_code)
1625                         rc = rte_le_to_cpu_16(resp->error_code);
1626                 switch (ring_type) {
1627                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1628                         PMD_DRV_LOG(ERR,
1629                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1630                         HWRM_UNLOCK();
1631                         return rc;
1632                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1633                         PMD_DRV_LOG(ERR,
1634                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1635                         HWRM_UNLOCK();
1636                         return rc;
1637                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1638                         PMD_DRV_LOG(ERR,
1639                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1640                                     rc);
1641                         HWRM_UNLOCK();
1642                         return rc;
1643                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1644                         PMD_DRV_LOG(ERR,
1645                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1646                         HWRM_UNLOCK();
1647                         return rc;
1648                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1649                         PMD_DRV_LOG(ERR,
1650                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1651                         HWRM_UNLOCK();
1652                         return rc;
1653                 default:
1654                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1655                         HWRM_UNLOCK();
1656                         return rc;
1657                 }
1658         }
1659
1660         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1661         HWRM_UNLOCK();
1662         return rc;
1663 }
1664
1665 int bnxt_hwrm_ring_free(struct bnxt *bp,
1666                         struct bnxt_ring *ring, uint32_t ring_type)
1667 {
1668         int rc;
1669         struct hwrm_ring_free_input req = {.req_type = 0 };
1670         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1671
1672         HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1673
1674         req.ring_type = ring_type;
1675         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1676
1677         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1678
1679         if (rc || resp->error_code) {
1680                 if (rc == 0 && resp->error_code)
1681                         rc = rte_le_to_cpu_16(resp->error_code);
1682                 HWRM_UNLOCK();
1683
1684                 switch (ring_type) {
1685                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1686                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1687                                 rc);
1688                         return rc;
1689                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1690                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1691                                 rc);
1692                         return rc;
1693                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1694                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1695                                 rc);
1696                         return rc;
1697                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1698                         PMD_DRV_LOG(ERR,
1699                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1700                         return rc;
1701                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1702                         PMD_DRV_LOG(ERR,
1703                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1704                         return rc;
1705                 default:
1706                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1707                         return rc;
1708                 }
1709         }
1710         HWRM_UNLOCK();
1711         return 0;
1712 }
1713
1714 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1715 {
1716         int rc = 0;
1717         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1718         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1719
1720         HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1721
1722         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1723         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1724         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1725         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1726
1727         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1728
1729         HWRM_CHECK_RESULT();
1730
1731         bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1732
1733         HWRM_UNLOCK();
1734
1735         return rc;
1736 }
1737
1738 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1739 {
1740         int rc;
1741         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1742         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1743
1744         HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1745
1746         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1747
1748         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1749
1750         HWRM_CHECK_RESULT();
1751         HWRM_UNLOCK();
1752
1753         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1754         return rc;
1755 }
1756
1757 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1758 {
1759         int rc = 0;
1760         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1761         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1762
1763         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1764                 return rc;
1765
1766         HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1767
1768         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1769
1770         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1771
1772         HWRM_CHECK_RESULT();
1773         HWRM_UNLOCK();
1774
1775         return rc;
1776 }
1777
1778 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1779                                 unsigned int idx __rte_unused)
1780 {
1781         int rc;
1782         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1783         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1784
1785         HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1786
1787         req.update_period_ms = rte_cpu_to_le_32(0);
1788
1789         req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1790
1791         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1792
1793         HWRM_CHECK_RESULT();
1794
1795         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1796
1797         HWRM_UNLOCK();
1798
1799         return rc;
1800 }
1801
1802 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1803                                 unsigned int idx __rte_unused)
1804 {
1805         int rc;
1806         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1807         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1808
1809         HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1810
1811         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1812
1813         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1814
1815         HWRM_CHECK_RESULT();
1816         HWRM_UNLOCK();
1817
1818         return rc;
1819 }
1820
1821 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1822 {
1823         int rc = 0, i, j;
1824         struct hwrm_vnic_alloc_input req = { 0 };
1825         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1826
1827         if (!BNXT_HAS_RING_GRPS(bp))
1828                 goto skip_ring_grps;
1829
1830         /* map ring groups to this vnic */
1831         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1832                 vnic->start_grp_id, vnic->end_grp_id);
1833         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1834                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1835
1836         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1837         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1838         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1839         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1840
1841 skip_ring_grps:
1842         vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
1843         HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1844
1845         if (vnic->func_default)
1846                 req.flags =
1847                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1848         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1849
1850         HWRM_CHECK_RESULT();
1851
1852         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1853         HWRM_UNLOCK();
1854         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1855         return rc;
1856 }
1857
1858 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1859                                         struct bnxt_vnic_info *vnic,
1860                                         struct bnxt_plcmodes_cfg *pmode)
1861 {
1862         int rc = 0;
1863         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1864         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1865
1866         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1867
1868         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1869
1870         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1871
1872         HWRM_CHECK_RESULT();
1873
1874         pmode->flags = rte_le_to_cpu_32(resp->flags);
1875         /* dflt_vnic bit doesn't exist in the _cfg command */
1876         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1877         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1878         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1879         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1880
1881         HWRM_UNLOCK();
1882
1883         return rc;
1884 }
1885
1886 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1887                                        struct bnxt_vnic_info *vnic,
1888                                        struct bnxt_plcmodes_cfg *pmode)
1889 {
1890         int rc = 0;
1891         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1892         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1893
1894         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1895                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1896                 return rc;
1897         }
1898
1899         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1900
1901         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1902         req.flags = rte_cpu_to_le_32(pmode->flags);
1903         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1904         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1905         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1906         req.enables = rte_cpu_to_le_32(
1907             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1908             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1909             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1910         );
1911
1912         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1913
1914         HWRM_CHECK_RESULT();
1915         HWRM_UNLOCK();
1916
1917         return rc;
1918 }
1919
1920 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1921 {
1922         int rc = 0;
1923         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1924         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1925         struct bnxt_plcmodes_cfg pmodes = { 0 };
1926         uint32_t ctx_enable_flag = 0;
1927         uint32_t enables = 0;
1928
1929         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1930                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1931                 return rc;
1932         }
1933
1934         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1935         if (rc)
1936                 return rc;
1937
1938         HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
1939
1940         if (BNXT_CHIP_P5(bp)) {
1941                 int dflt_rxq = vnic->start_grp_id;
1942                 struct bnxt_rx_ring_info *rxr;
1943                 struct bnxt_cp_ring_info *cpr;
1944                 struct bnxt_rx_queue *rxq;
1945                 int i;
1946
1947                 /*
1948                  * The first active receive ring is used as the VNIC
1949                  * default receive ring. If there are no active receive
1950                  * rings (all corresponding receive queues are stopped),
1951                  * the first receive ring is used.
1952                  */
1953                 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
1954                         rxq = bp->eth_dev->data->rx_queues[i];
1955                         if (rxq->rx_started) {
1956                                 dflt_rxq = i;
1957                                 break;
1958                         }
1959                 }
1960
1961                 rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
1962                 rxr = rxq->rx_ring;
1963                 cpr = rxq->cp_ring;
1964
1965                 req.default_rx_ring_id =
1966                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1967                 req.default_cmpl_ring_id =
1968                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1969                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1970                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1971                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) {
1972                         enables |= HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE;
1973                         req.rx_csum_v2_mode =
1974                                 HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK;
1975                 }
1976                 goto config_mru;
1977         }
1978
1979         /* Only RSS support for now TBD: COS & LB */
1980         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1981         if (vnic->lb_rule != 0xffff)
1982                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1983         if (vnic->cos_rule != 0xffff)
1984                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1985         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1986                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1987                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1988         }
1989         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1990                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
1991                 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
1992         }
1993
1994         enables |= ctx_enable_flag;
1995         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1996         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1997         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1998         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1999
2000 config_mru:
2001         req.enables = rte_cpu_to_le_32(enables);
2002         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2003         req.mru = rte_cpu_to_le_16(vnic->mru);
2004         /* Configure default VNIC only once. */
2005         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
2006                 req.flags |=
2007                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2008                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
2009         }
2010         if (vnic->vlan_strip)
2011                 req.flags |=
2012                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2013         if (vnic->bd_stall)
2014                 req.flags |=
2015                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2016         if (vnic->roce_dual)
2017                 req.flags |= rte_cpu_to_le_32(
2018                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
2019         if (vnic->roce_only)
2020                 req.flags |= rte_cpu_to_le_32(
2021                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
2022         if (vnic->rss_dflt_cr)
2023                 req.flags |= rte_cpu_to_le_32(
2024                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
2025
2026         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2027
2028         HWRM_CHECK_RESULT();
2029         HWRM_UNLOCK();
2030
2031         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
2032
2033         return rc;
2034 }
2035
2036 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
2037                 int16_t fw_vf_id)
2038 {
2039         int rc = 0;
2040         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
2041         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2042
2043         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2044                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
2045                 return rc;
2046         }
2047         HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2048
2049         req.enables =
2050                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2051         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2052         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2053
2054         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2055
2056         HWRM_CHECK_RESULT();
2057
2058         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2059         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2060         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2061         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2062         vnic->mru = rte_le_to_cpu_16(resp->mru);
2063         vnic->func_default = rte_le_to_cpu_32(
2064                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2065         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2066                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2067         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2068                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2069         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
2070                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
2071         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
2072                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
2073         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2074                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2075
2076         HWRM_UNLOCK();
2077
2078         return rc;
2079 }
2080
2081 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2082                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2083 {
2084         int rc = 0;
2085         uint16_t ctx_id;
2086         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2087         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2088                                                 bp->hwrm_cmd_resp_addr;
2089
2090         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2091
2092         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2093         HWRM_CHECK_RESULT();
2094
2095         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2096         if (!BNXT_HAS_RING_GRPS(bp))
2097                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
2098         else if (ctx_idx == 0)
2099                 vnic->rss_rule = ctx_id;
2100
2101         HWRM_UNLOCK();
2102
2103         return rc;
2104 }
2105
2106 static
2107 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2108                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2109 {
2110         int rc = 0;
2111         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2112         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2113                                                 bp->hwrm_cmd_resp_addr;
2114
2115         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2116                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2117                 return rc;
2118         }
2119         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2120
2121         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2122
2123         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2124
2125         HWRM_CHECK_RESULT();
2126         HWRM_UNLOCK();
2127
2128         return rc;
2129 }
2130
2131 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2132 {
2133         int rc = 0;
2134
2135         if (BNXT_CHIP_P5(bp)) {
2136                 int j;
2137
2138                 for (j = 0; j < vnic->num_lb_ctxts; j++) {
2139                         rc = _bnxt_hwrm_vnic_ctx_free(bp,
2140                                                       vnic,
2141                                                       vnic->fw_grp_ids[j]);
2142                         vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2143                 }
2144                 vnic->num_lb_ctxts = 0;
2145         } else {
2146                 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2147                 vnic->rss_rule = INVALID_HW_RING_ID;
2148         }
2149
2150         return rc;
2151 }
2152
2153 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2154 {
2155         int rc = 0;
2156         struct hwrm_vnic_free_input req = {.req_type = 0 };
2157         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2158
2159         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2160                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2161                 return rc;
2162         }
2163
2164         HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2165
2166         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2167
2168         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2169
2170         HWRM_CHECK_RESULT();
2171         HWRM_UNLOCK();
2172
2173         vnic->fw_vnic_id = INVALID_HW_RING_ID;
2174         /* Configure default VNIC again if necessary. */
2175         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2176                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2177
2178         return rc;
2179 }
2180
2181 static int
2182 bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2183 {
2184         int i;
2185         int rc = 0;
2186         int nr_ctxs = vnic->num_lb_ctxts;
2187         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2188         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2189
2190         for (i = 0; i < nr_ctxs; i++) {
2191                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2192
2193                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2194                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2195                 req.hash_mode_flags = vnic->hash_mode;
2196
2197                 req.hash_key_tbl_addr =
2198                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2199
2200                 req.ring_grp_tbl_addr =
2201                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2202                                          i * HW_HASH_INDEX_SIZE);
2203                 req.ring_table_pair_index = i;
2204                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2205
2206                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2207                                             BNXT_USE_CHIMP_MB);
2208
2209                 HWRM_CHECK_RESULT();
2210                 HWRM_UNLOCK();
2211         }
2212
2213         return rc;
2214 }
2215
2216 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2217                            struct bnxt_vnic_info *vnic)
2218 {
2219         int rc = 0;
2220         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2221         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2222
2223         if (!vnic->rss_table)
2224                 return 0;
2225
2226         if (BNXT_CHIP_P5(bp))
2227                 return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
2228
2229         HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2230
2231         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2232         req.hash_mode_flags = vnic->hash_mode;
2233
2234         req.ring_grp_tbl_addr =
2235             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2236         req.hash_key_tbl_addr =
2237             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2238         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2239         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2240
2241         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2242
2243         HWRM_CHECK_RESULT();
2244         HWRM_UNLOCK();
2245
2246         return rc;
2247 }
2248
2249 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2250                         struct bnxt_vnic_info *vnic)
2251 {
2252         int rc = 0;
2253         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2254         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2255         uint16_t size;
2256
2257         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2258                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2259                 return rc;
2260         }
2261
2262         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2263
2264         req.flags = rte_cpu_to_le_32(
2265                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2266
2267         req.enables = rte_cpu_to_le_32(
2268                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2269
2270         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2271         size -= RTE_PKTMBUF_HEADROOM;
2272         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2273
2274         req.jumbo_thresh = rte_cpu_to_le_16(size);
2275         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2276
2277         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2278
2279         HWRM_CHECK_RESULT();
2280         HWRM_UNLOCK();
2281
2282         return rc;
2283 }
2284
2285 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2286                         struct bnxt_vnic_info *vnic, bool enable)
2287 {
2288         int rc = 0;
2289         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2290         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2291
2292         if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
2293                 if (enable)
2294                         PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2295                 return -ENOTSUP;
2296         }
2297
2298         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2299                 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2300                 return 0;
2301         }
2302
2303         HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2304
2305         if (enable) {
2306                 req.enables = rte_cpu_to_le_32(
2307                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2308                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2309                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2310                 req.flags = rte_cpu_to_le_32(
2311                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2312                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2313                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2314                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2315                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2316                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2317                 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2318                 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2319                 req.min_agg_len = rte_cpu_to_le_32(512);
2320         }
2321         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2322
2323         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2324
2325         HWRM_CHECK_RESULT();
2326         HWRM_UNLOCK();
2327
2328         return rc;
2329 }
2330
2331 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2332 {
2333         struct hwrm_func_cfg_input req = {0};
2334         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2335         int rc;
2336
2337         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2338         req.enables = rte_cpu_to_le_32(
2339                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2340         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2341         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2342
2343         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2344
2345         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2346         HWRM_CHECK_RESULT();
2347         HWRM_UNLOCK();
2348
2349         bp->pf->vf_info[vf].random_mac = false;
2350
2351         return rc;
2352 }
2353
2354 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2355                                   uint64_t *dropped)
2356 {
2357         int rc = 0;
2358         struct hwrm_func_qstats_input req = {.req_type = 0};
2359         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2360
2361         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2362
2363         req.fid = rte_cpu_to_le_16(fid);
2364
2365         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2366
2367         HWRM_CHECK_RESULT();
2368
2369         if (dropped)
2370                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2371
2372         HWRM_UNLOCK();
2373
2374         return rc;
2375 }
2376
2377 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2378                           struct rte_eth_stats *stats,
2379                           struct hwrm_func_qstats_output *func_qstats)
2380 {
2381         int rc = 0;
2382         struct hwrm_func_qstats_input req = {.req_type = 0};
2383         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2384
2385         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2386
2387         req.fid = rte_cpu_to_le_16(fid);
2388
2389         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2390
2391         HWRM_CHECK_RESULT();
2392         if (func_qstats)
2393                 memcpy(func_qstats, resp,
2394                        sizeof(struct hwrm_func_qstats_output));
2395
2396         if (!stats)
2397                 goto exit;
2398
2399         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2400         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2401         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2402         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2403         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2404         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2405
2406         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2407         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2408         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2409         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2410         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2411         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2412
2413         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2414         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2415         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2416
2417 exit:
2418         HWRM_UNLOCK();
2419
2420         return rc;
2421 }
2422
2423 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2424 {
2425         int rc = 0;
2426         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2427         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2428
2429         HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2430
2431         req.fid = rte_cpu_to_le_16(fid);
2432
2433         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2434
2435         HWRM_CHECK_RESULT();
2436         HWRM_UNLOCK();
2437
2438         return rc;
2439 }
2440
2441 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2442 {
2443         unsigned int i;
2444         int rc = 0;
2445
2446         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2447                 struct bnxt_tx_queue *txq;
2448                 struct bnxt_rx_queue *rxq;
2449                 struct bnxt_cp_ring_info *cpr;
2450
2451                 if (i >= bp->rx_cp_nr_rings) {
2452                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2453                         cpr = txq->cp_ring;
2454                 } else {
2455                         rxq = bp->rx_queues[i];
2456                         cpr = rxq->cp_ring;
2457                 }
2458
2459                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2460                 if (rc)
2461                         return rc;
2462         }
2463         return 0;
2464 }
2465
2466 static int
2467 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2468 {
2469         int rc;
2470         unsigned int i;
2471         struct bnxt_cp_ring_info *cpr;
2472
2473         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2474
2475                 if (i >= bp->rx_cp_nr_rings) {
2476                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2477                 } else {
2478                         cpr = bp->rx_queues[i]->cp_ring;
2479                         if (BNXT_HAS_RING_GRPS(bp))
2480                                 bp->grp_info[i].fw_stats_ctx = -1;
2481                 }
2482                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2483                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2484                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2485                         if (rc)
2486                                 return rc;
2487                 }
2488         }
2489         return 0;
2490 }
2491
2492 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2493 {
2494         unsigned int i;
2495         int rc = 0;
2496
2497         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2498                 struct bnxt_tx_queue *txq;
2499                 struct bnxt_rx_queue *rxq;
2500                 struct bnxt_cp_ring_info *cpr;
2501
2502                 if (i >= bp->rx_cp_nr_rings) {
2503                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2504                         cpr = txq->cp_ring;
2505                 } else {
2506                         rxq = bp->rx_queues[i];
2507                         cpr = rxq->cp_ring;
2508                 }
2509
2510                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2511
2512                 if (rc)
2513                         return rc;
2514         }
2515         return rc;
2516 }
2517
2518 static int
2519 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2520 {
2521         uint16_t idx;
2522         uint32_t rc = 0;
2523
2524         if (!BNXT_HAS_RING_GRPS(bp))
2525                 return 0;
2526
2527         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2528
2529                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2530                         continue;
2531
2532                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2533
2534                 if (rc)
2535                         return rc;
2536         }
2537         return rc;
2538 }
2539
2540 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2541 {
2542         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2543
2544         bnxt_hwrm_ring_free(bp, cp_ring,
2545                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2546         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2547         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2548                                      sizeof(*cpr->cp_desc_ring));
2549         cpr->cp_raw_cons = 0;
2550         cpr->valid = 0;
2551 }
2552
2553 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2554 {
2555         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2556
2557         bnxt_hwrm_ring_free(bp, cp_ring,
2558                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2559         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2560         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2561                         sizeof(*cpr->cp_desc_ring));
2562         cpr->cp_raw_cons = 0;
2563         cpr->valid = 0;
2564 }
2565
2566 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2567 {
2568         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2569         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2570         struct bnxt_ring *ring = rxr->rx_ring_struct;
2571         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2572
2573         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2574                 bnxt_hwrm_ring_free(bp, ring,
2575                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2576                 ring->fw_ring_id = INVALID_HW_RING_ID;
2577                 if (BNXT_HAS_RING_GRPS(bp))
2578                         bp->grp_info[queue_index].rx_fw_ring_id =
2579                                                         INVALID_HW_RING_ID;
2580         }
2581         ring = rxr->ag_ring_struct;
2582         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2583                 bnxt_hwrm_ring_free(bp, ring,
2584                                     BNXT_CHIP_P5(bp) ?
2585                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2586                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2587                 if (BNXT_HAS_RING_GRPS(bp))
2588                         bp->grp_info[queue_index].ag_fw_ring_id =
2589                                                         INVALID_HW_RING_ID;
2590         }
2591         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2592                 bnxt_free_cp_ring(bp, cpr);
2593
2594         if (BNXT_HAS_RING_GRPS(bp))
2595                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2596 }
2597
2598 static int
2599 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2600 {
2601         unsigned int i;
2602
2603         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2604                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2605                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2606                 struct bnxt_ring *ring = txr->tx_ring_struct;
2607                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2608
2609                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2610                         bnxt_hwrm_ring_free(bp, ring,
2611                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2612                         ring->fw_ring_id = INVALID_HW_RING_ID;
2613                         memset(txr->tx_desc_ring, 0,
2614                                         txr->tx_ring_struct->ring_size *
2615                                         sizeof(*txr->tx_desc_ring));
2616                         memset(txr->tx_buf_ring, 0,
2617                                         txr->tx_ring_struct->ring_size *
2618                                         sizeof(*txr->tx_buf_ring));
2619                         txr->tx_raw_prod = 0;
2620                         txr->tx_raw_cons = 0;
2621                 }
2622                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2623                         bnxt_free_cp_ring(bp, cpr);
2624                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2625                 }
2626         }
2627
2628         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2629                 bnxt_free_hwrm_rx_ring(bp, i);
2630
2631         return 0;
2632 }
2633
2634 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2635 {
2636         uint16_t i;
2637         uint32_t rc = 0;
2638
2639         if (!BNXT_HAS_RING_GRPS(bp))
2640                 return 0;
2641
2642         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2643                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2644                 if (rc)
2645                         return rc;
2646         }
2647         return rc;
2648 }
2649
2650 /*
2651  * HWRM utility functions
2652  */
2653
2654 void bnxt_free_hwrm_resources(struct bnxt *bp)
2655 {
2656         /* Release memzone */
2657         rte_free(bp->hwrm_cmd_resp_addr);
2658         rte_free(bp->hwrm_short_cmd_req_addr);
2659         bp->hwrm_cmd_resp_addr = NULL;
2660         bp->hwrm_short_cmd_req_addr = NULL;
2661         bp->hwrm_cmd_resp_dma_addr = 0;
2662         bp->hwrm_short_cmd_req_dma_addr = 0;
2663 }
2664
2665 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2666 {
2667         struct rte_pci_device *pdev = bp->pdev;
2668         char type[RTE_MEMZONE_NAMESIZE];
2669
2670         sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2671                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2672         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2673         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2674         if (bp->hwrm_cmd_resp_addr == NULL)
2675                 return -ENOMEM;
2676         bp->hwrm_cmd_resp_dma_addr =
2677                 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2678         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2679                 PMD_DRV_LOG(ERR,
2680                         "unable to map response address to physical memory\n");
2681                 return -ENOMEM;
2682         }
2683         rte_spinlock_init(&bp->hwrm_lock);
2684
2685         return 0;
2686 }
2687
2688 int
2689 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2690 {
2691         int rc = 0;
2692
2693         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2694                 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2695                 if (rc)
2696                         return rc;
2697         } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2698                 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2699                 if (rc)
2700                         return rc;
2701         }
2702
2703         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2704         return rc;
2705 }
2706
2707 static int
2708 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2709 {
2710         struct bnxt_filter_info *filter;
2711         int rc = 0;
2712
2713         STAILQ_FOREACH(filter, &vnic->filter, next) {
2714                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2715                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2716                 bnxt_free_filter(bp, filter);
2717         }
2718         return rc;
2719 }
2720
2721 static int
2722 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2723 {
2724         struct bnxt_filter_info *filter;
2725         struct rte_flow *flow;
2726         int rc = 0;
2727
2728         while (!STAILQ_EMPTY(&vnic->flow_list)) {
2729                 flow = STAILQ_FIRST(&vnic->flow_list);
2730                 filter = flow->filter;
2731                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2732                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2733
2734                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2735                 rte_free(flow);
2736         }
2737         return rc;
2738 }
2739
2740 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2741 {
2742         struct bnxt_filter_info *filter;
2743         int rc = 0;
2744
2745         STAILQ_FOREACH(filter, &vnic->filter, next) {
2746                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2747                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2748                                                      filter);
2749                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2750                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2751                                                          filter);
2752                 else
2753                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2754                                                      filter);
2755                 if (rc)
2756                         break;
2757         }
2758         return rc;
2759 }
2760
2761 static void
2762 bnxt_free_tunnel_ports(struct bnxt *bp)
2763 {
2764         if (bp->vxlan_port_cnt)
2765                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2766                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2767
2768         if (bp->geneve_port_cnt)
2769                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2770                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2771 }
2772
2773 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2774 {
2775         int i;
2776
2777         if (bp->vnic_info == NULL)
2778                 return;
2779
2780         /*
2781          * Cleanup VNICs in reverse order, to make sure the L2 filter
2782          * from vnic0 is last to be cleaned up.
2783          */
2784         for (i = bp->max_vnics - 1; i >= 0; i--) {
2785                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2786
2787                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2788                         continue;
2789
2790                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2791
2792                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2793
2794                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2795
2796                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2797
2798                 bnxt_hwrm_vnic_free(bp, vnic);
2799
2800                 rte_free(vnic->fw_grp_ids);
2801         }
2802         /* Ring resources */
2803         bnxt_free_all_hwrm_rings(bp);
2804         bnxt_free_all_hwrm_ring_grps(bp);
2805         bnxt_free_all_hwrm_stat_ctxs(bp);
2806         bnxt_free_tunnel_ports(bp);
2807 }
2808
2809 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2810 {
2811         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2812
2813         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2814                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2815
2816         switch (conf_link_speed) {
2817         case ETH_LINK_SPEED_10M_HD:
2818         case ETH_LINK_SPEED_100M_HD:
2819                 /* FALLTHROUGH */
2820                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2821         }
2822         return hw_link_duplex;
2823 }
2824
2825 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2826 {
2827         return !conf_link;
2828 }
2829
2830 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
2831                                           uint16_t pam4_link)
2832 {
2833         uint16_t eth_link_speed = 0;
2834
2835         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2836                 return ETH_LINK_SPEED_AUTONEG;
2837
2838         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2839         case ETH_LINK_SPEED_100M:
2840         case ETH_LINK_SPEED_100M_HD:
2841                 /* FALLTHROUGH */
2842                 eth_link_speed =
2843                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2844                 break;
2845         case ETH_LINK_SPEED_1G:
2846                 eth_link_speed =
2847                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2848                 break;
2849         case ETH_LINK_SPEED_2_5G:
2850                 eth_link_speed =
2851                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2852                 break;
2853         case ETH_LINK_SPEED_10G:
2854                 eth_link_speed =
2855                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2856                 break;
2857         case ETH_LINK_SPEED_20G:
2858                 eth_link_speed =
2859                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2860                 break;
2861         case ETH_LINK_SPEED_25G:
2862                 eth_link_speed =
2863                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2864                 break;
2865         case ETH_LINK_SPEED_40G:
2866                 eth_link_speed =
2867                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2868                 break;
2869         case ETH_LINK_SPEED_50G:
2870                 eth_link_speed = pam4_link ?
2871                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
2872                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2873                 break;
2874         case ETH_LINK_SPEED_100G:
2875                 eth_link_speed = pam4_link ?
2876                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
2877                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2878                 break;
2879         case ETH_LINK_SPEED_200G:
2880                 eth_link_speed =
2881                         HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2882                 break;
2883         default:
2884                 PMD_DRV_LOG(ERR,
2885                         "Unsupported link speed %d; default to AUTO\n",
2886                         conf_link_speed);
2887                 break;
2888         }
2889         return eth_link_speed;
2890 }
2891
2892 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2893                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2894                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2895                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
2896                 ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
2897
2898 static int bnxt_validate_link_speed(struct bnxt *bp)
2899 {
2900         uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
2901         uint16_t port_id = bp->eth_dev->data->port_id;
2902         uint32_t link_speed_capa;
2903         uint32_t one_speed;
2904
2905         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2906                 return 0;
2907
2908         link_speed_capa = bnxt_get_speed_capabilities(bp);
2909
2910         if (link_speed & ETH_LINK_SPEED_FIXED) {
2911                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2912
2913                 if (one_speed & (one_speed - 1)) {
2914                         PMD_DRV_LOG(ERR,
2915                                 "Invalid advertised speeds (%u) for port %u\n",
2916                                 link_speed, port_id);
2917                         return -EINVAL;
2918                 }
2919                 if ((one_speed & link_speed_capa) != one_speed) {
2920                         PMD_DRV_LOG(ERR,
2921                                 "Unsupported advertised speed (%u) for port %u\n",
2922                                 link_speed, port_id);
2923                         return -EINVAL;
2924                 }
2925         } else {
2926                 if (!(link_speed & link_speed_capa)) {
2927                         PMD_DRV_LOG(ERR,
2928                                 "Unsupported advertised speeds (%u) for port %u\n",
2929                                 link_speed, port_id);
2930                         return -EINVAL;
2931                 }
2932         }
2933         return 0;
2934 }
2935
2936 static uint16_t
2937 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2938 {
2939         uint16_t ret = 0;
2940
2941         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2942                 if (bp->link_info->support_speeds)
2943                         return bp->link_info->support_speeds;
2944                 link_speed = BNXT_SUPPORTED_SPEEDS;
2945         }
2946
2947         if (link_speed & ETH_LINK_SPEED_100M)
2948                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2949         if (link_speed & ETH_LINK_SPEED_100M_HD)
2950                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2951         if (link_speed & ETH_LINK_SPEED_1G)
2952                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2953         if (link_speed & ETH_LINK_SPEED_2_5G)
2954                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2955         if (link_speed & ETH_LINK_SPEED_10G)
2956                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2957         if (link_speed & ETH_LINK_SPEED_20G)
2958                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2959         if (link_speed & ETH_LINK_SPEED_25G)
2960                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2961         if (link_speed & ETH_LINK_SPEED_40G)
2962                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2963         if (link_speed & ETH_LINK_SPEED_50G)
2964                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2965         if (link_speed & ETH_LINK_SPEED_100G)
2966                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2967         if (link_speed & ETH_LINK_SPEED_200G)
2968                 ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2969         return ret;
2970 }
2971
2972 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2973 {
2974         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2975
2976         switch (hw_link_speed) {
2977         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2978                 eth_link_speed = ETH_SPEED_NUM_100M;
2979                 break;
2980         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2981                 eth_link_speed = ETH_SPEED_NUM_1G;
2982                 break;
2983         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2984                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2985                 break;
2986         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2987                 eth_link_speed = ETH_SPEED_NUM_10G;
2988                 break;
2989         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2990                 eth_link_speed = ETH_SPEED_NUM_20G;
2991                 break;
2992         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2993                 eth_link_speed = ETH_SPEED_NUM_25G;
2994                 break;
2995         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2996                 eth_link_speed = ETH_SPEED_NUM_40G;
2997                 break;
2998         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2999                 eth_link_speed = ETH_SPEED_NUM_50G;
3000                 break;
3001         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
3002                 eth_link_speed = ETH_SPEED_NUM_100G;
3003                 break;
3004         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
3005                 eth_link_speed = ETH_SPEED_NUM_200G;
3006                 break;
3007         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
3008         default:
3009                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
3010                         hw_link_speed);
3011                 break;
3012         }
3013         return eth_link_speed;
3014 }
3015
3016 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
3017 {
3018         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3019
3020         switch (hw_link_duplex) {
3021         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
3022         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
3023                 /* FALLTHROUGH */
3024                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3025                 break;
3026         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
3027                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
3028                 break;
3029         default:
3030                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
3031                         hw_link_duplex);
3032                 break;
3033         }
3034         return eth_link_duplex;
3035 }
3036
3037 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
3038 {
3039         int rc = 0;
3040         struct bnxt_link_info *link_info = bp->link_info;
3041
3042         rc = bnxt_hwrm_port_phy_qcaps(bp);
3043         if (rc)
3044                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3045
3046         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
3047         if (rc) {
3048                 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3049                 goto exit;
3050         }
3051
3052         if (link_info->link_speed)
3053                 link->link_speed =
3054                         bnxt_parse_hw_link_speed(link_info->link_speed);
3055         else
3056                 link->link_speed = ETH_SPEED_NUM_NONE;
3057         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3058         link->link_status = link_info->link_up;
3059         link->link_autoneg = link_info->auto_mode ==
3060                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
3061                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
3062 exit:
3063         return rc;
3064 }
3065
3066 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3067 {
3068         int rc = 0;
3069         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3070         struct bnxt_link_info link_req;
3071         uint16_t speed, autoneg;
3072
3073         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3074                 return 0;
3075
3076         rc = bnxt_validate_link_speed(bp);
3077         if (rc)
3078                 goto error;
3079
3080         memset(&link_req, 0, sizeof(link_req));
3081         link_req.link_up = link_up;
3082         if (!link_up)
3083                 goto port_phy_cfg;
3084
3085         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3086         if (BNXT_CHIP_P5(bp) &&
3087             dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
3088                 /* 40G is not supported as part of media auto detect.
3089                  * The speed should be forced and autoneg disabled
3090                  * to configure 40G speed.
3091                  */
3092                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3093                 autoneg = 0;
3094         }
3095
3096         /* No auto speeds and no auto_pam4_link. Disable autoneg */
3097         if (bp->link_info->auto_link_speed == 0 &&
3098             bp->link_info->link_signal_mode &&
3099             bp->link_info->auto_pam4_link_speeds == 0)
3100                 autoneg = 0;
3101
3102         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
3103                                           bp->link_info->link_signal_mode);
3104         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3105         /* Autoneg can be done only when the FW allows.
3106          * When user configures fixed speed of 40G and later changes to
3107          * any other speed, auto_link_speed/force_link_speed is still set
3108          * to 40G until link comes up at new speed.
3109          */
3110         if (autoneg == 1 &&
3111             !(!BNXT_CHIP_P5(bp) &&
3112               (bp->link_info->auto_link_speed ||
3113                bp->link_info->force_link_speed))) {
3114                 link_req.phy_flags |=
3115                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3116                 link_req.auto_link_speed_mask =
3117                         bnxt_parse_eth_link_speed_mask(bp,
3118                                                        dev_conf->link_speeds);
3119         } else {
3120                 if (bp->link_info->phy_type ==
3121                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3122                     bp->link_info->phy_type ==
3123                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3124                     bp->link_info->media_type ==
3125                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3126                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3127                         return -EINVAL;
3128                 }
3129
3130                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3131                 /* If user wants a particular speed try that first. */
3132                 if (speed)
3133                         link_req.link_speed = speed;
3134                 else if (bp->link_info->force_pam4_link_speed)
3135                         link_req.link_speed =
3136                                 bp->link_info->force_pam4_link_speed;
3137                 else if (bp->link_info->auto_pam4_link_speeds)
3138                         link_req.link_speed =
3139                                 bp->link_info->auto_pam4_link_speeds;
3140                 else if (bp->link_info->support_pam4_speeds)
3141                         link_req.link_speed =
3142                                 bp->link_info->support_pam4_speeds;
3143                 else if (bp->link_info->force_link_speed)
3144                         link_req.link_speed = bp->link_info->force_link_speed;
3145                 else
3146                         link_req.link_speed = bp->link_info->auto_link_speed;
3147                 /* Auto PAM4 link speed is zero, but auto_link_speed is not
3148                  * zero. Use the auto_link_speed.
3149                  */
3150                 if (bp->link_info->auto_link_speed != 0 &&
3151                     bp->link_info->auto_pam4_link_speeds == 0)
3152                         link_req.link_speed = bp->link_info->auto_link_speed;
3153         }
3154         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3155         link_req.auto_pause = bp->link_info->auto_pause;
3156         link_req.force_pause = bp->link_info->force_pause;
3157
3158 port_phy_cfg:
3159         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3160         if (rc) {
3161                 PMD_DRV_LOG(ERR,
3162                         "Set link config failed with rc %d\n", rc);
3163         }
3164
3165 error:
3166         return rc;
3167 }
3168
3169 /* JIRA 22088 */
3170 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3171 {
3172         struct hwrm_func_qcfg_input req = {0};
3173         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3174         uint16_t flags;
3175         int rc = 0;
3176         bp->func_svif = BNXT_SVIF_INVALID;
3177         uint16_t svif_info;
3178
3179         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3180         req.fid = rte_cpu_to_le_16(0xffff);
3181
3182         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3183
3184         HWRM_CHECK_RESULT();
3185
3186         /* Hard Coded.. 0xfff VLAN ID mask */
3187         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3188
3189         svif_info = rte_le_to_cpu_16(resp->svif_info);
3190         if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3191                 bp->func_svif = svif_info &
3192                                      HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3193
3194         flags = rte_le_to_cpu_16(resp->flags);
3195         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3196                 bp->flags |= BNXT_FLAG_MULTI_HOST;
3197
3198         if (BNXT_VF(bp) &&
3199             !BNXT_VF_IS_TRUSTED(bp) &&
3200             (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3201                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3202                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3203         } else if (BNXT_VF(bp) &&
3204                    BNXT_VF_IS_TRUSTED(bp) &&
3205                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3206                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3207                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3208         }
3209
3210         if (mtu)
3211                 *mtu = rte_le_to_cpu_16(resp->mtu);
3212
3213         switch (resp->port_partition_type) {
3214         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3215         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3216         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3217                 /* FALLTHROUGH */
3218                 bp->flags |= BNXT_FLAG_NPAR_PF;
3219                 break;
3220         default:
3221                 bp->flags &= ~BNXT_FLAG_NPAR_PF;
3222                 break;
3223         }
3224
3225         bp->legacy_db_size =
3226                 rte_le_to_cpu_16(resp->legacy_l2_db_size_kb) * 1024;
3227
3228         HWRM_UNLOCK();
3229
3230         return rc;
3231 }
3232
3233 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3234 {
3235         struct hwrm_func_qcfg_input req = {0};
3236         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3237         int rc;
3238
3239         if (!BNXT_VF_IS_TRUSTED(bp))
3240                 return 0;
3241
3242         if (!bp->parent)
3243                 return -EINVAL;
3244
3245         bp->parent->fid = BNXT_PF_FID_INVALID;
3246
3247         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3248
3249         req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3250
3251         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3252
3253         HWRM_CHECK_RESULT();
3254
3255         memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3256         bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3257         bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3258         bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3259
3260         /* FIXME: Temporary workaround - remove when firmware issue is fixed. */
3261         if (bp->parent->vnic == 0) {
3262                 PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n");
3263                 /* Use hard-coded values appropriate for current Wh+ fw. */
3264                 if (bp->parent->fid == 2)
3265                         bp->parent->vnic = 0x100;
3266                 else
3267                         bp->parent->vnic = 1;
3268         }
3269
3270         HWRM_UNLOCK();
3271
3272         return 0;
3273 }
3274
3275 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3276                                  uint16_t *vnic_id, uint16_t *svif)
3277 {
3278         struct hwrm_func_qcfg_input req = {0};
3279         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3280         uint16_t svif_info;
3281         int rc = 0;
3282
3283         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3284         req.fid = rte_cpu_to_le_16(fid);
3285
3286         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3287
3288         HWRM_CHECK_RESULT();
3289
3290         if (vnic_id)
3291                 *vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3292
3293         svif_info = rte_le_to_cpu_16(resp->svif_info);
3294         if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3295                 *svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3296
3297         HWRM_UNLOCK();
3298
3299         return rc;
3300 }
3301
3302 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3303 {
3304         struct hwrm_port_mac_qcfg_input req = {0};
3305         struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3306         uint16_t port_svif_info;
3307         int rc;
3308
3309         bp->port_svif = BNXT_SVIF_INVALID;
3310
3311         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3312                 return 0;
3313
3314         HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3315
3316         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3317
3318         HWRM_CHECK_RESULT_SILENT();
3319
3320         port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3321         if (port_svif_info &
3322             HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3323                 bp->port_svif = port_svif_info &
3324                         HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3325
3326         HWRM_UNLOCK();
3327
3328         return 0;
3329 }
3330
3331 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
3332                                  struct bnxt_pf_resource_info *pf_resc)
3333 {
3334         struct hwrm_func_cfg_input req = {0};
3335         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3336         uint32_t enables;
3337         int rc;
3338
3339         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3340                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3341                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3342                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3343                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3344                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3345                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3346                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3347                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3348
3349         if (BNXT_HAS_RING_GRPS(bp)) {
3350                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3351                 req.num_hw_ring_grps =
3352                         rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
3353         } else if (BNXT_HAS_NQ(bp)) {
3354                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3355                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3356         }
3357
3358         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3359         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3360         req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3361         req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
3362         req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
3363         req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
3364         req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
3365         req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
3366         req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
3367         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3368         req.fid = rte_cpu_to_le_16(0xffff);
3369         req.enables = rte_cpu_to_le_32(enables);
3370
3371         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3372
3373         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3374
3375         HWRM_CHECK_RESULT();
3376         HWRM_UNLOCK();
3377
3378         return rc;
3379 }
3380
3381 /* min values are the guaranteed resources and max values are subject
3382  * to availability. The strategy for now is to keep both min & max
3383  * values the same.
3384  */
3385 static void
3386 bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
3387                               struct hwrm_func_vf_resource_cfg_input *req,
3388                               int num_vfs)
3389 {
3390         req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3391                                                (num_vfs + 1));
3392         req->min_rsscos_ctx = req->max_rsscos_ctx;
3393         req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3394         req->min_stat_ctx = req->max_stat_ctx;
3395         req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3396                                                (num_vfs + 1));
3397         req->min_cmpl_rings = req->max_cmpl_rings;
3398         req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3399         req->min_tx_rings = req->max_tx_rings;
3400         req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3401         req->min_rx_rings = req->max_rx_rings;
3402         req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3403         req->min_l2_ctxs = req->max_l2_ctxs;
3404         /* TODO: For now, do not support VMDq/RFS on VFs. */
3405         req->max_vnics = rte_cpu_to_le_16(1);
3406         req->min_vnics = req->max_vnics;
3407         req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3408                                                  (num_vfs + 1));
3409         req->min_hw_ring_grps = req->max_hw_ring_grps;
3410         req->flags =
3411          rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
3412 }
3413
3414 static void
3415 bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
3416                               struct hwrm_func_cfg_input *req,
3417                               int num_vfs)
3418 {
3419         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3420                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3421                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3422                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3423                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3424                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3425                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3426                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3427                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3428                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3429
3430         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3431                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3432                                     BNXT_NUM_VLANS);
3433         req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3434         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3435                                                 (num_vfs + 1));
3436         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3437         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3438                                                (num_vfs + 1));
3439         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3440         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3441         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3442         /* TODO: For now, do not support VMDq/RFS on VFs. */
3443         req->num_vnics = rte_cpu_to_le_16(1);
3444         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3445                                                  (num_vfs + 1));
3446 }
3447
3448 /* Update the port wide resource values based on how many resources
3449  * got allocated to the VF.
3450  */
3451 static int bnxt_update_max_resources(struct bnxt *bp,
3452                                      int vf)
3453 {
3454         struct hwrm_func_qcfg_input req = {0};
3455         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3456         int rc;
3457
3458         /* Get the actual allocated values now */
3459         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3460         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3461         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3462         HWRM_CHECK_RESULT();
3463
3464         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3465         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
3466         bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3467         bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
3468         bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
3469         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
3470         bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3471
3472         HWRM_UNLOCK();
3473
3474         return 0;
3475 }
3476
3477 /* Update the PF resource values based on how many resources
3478  * got allocated to it.
3479  */
3480 static int bnxt_update_max_resources_pf_only(struct bnxt *bp)
3481 {
3482         struct hwrm_func_qcfg_input req = {0};
3483         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3484         int rc;
3485
3486         /* Get the actual allocated values now */
3487         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3488         req.fid = rte_cpu_to_le_16(0xffff);
3489         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3490         HWRM_CHECK_RESULT();
3491
3492         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3493         bp->max_stat_ctx = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3494         bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3495         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3496         bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3497         bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3498         bp->max_ring_grps = rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3499         bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
3500
3501         HWRM_UNLOCK();
3502
3503         return 0;
3504 }
3505
3506 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3507 {
3508         struct hwrm_func_qcfg_input req = {0};
3509         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3510         int rc;
3511
3512         /* Check for zero MAC address */
3513         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3514         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3515         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3516         HWRM_CHECK_RESULT();
3517         rc = rte_le_to_cpu_16(resp->vlan);
3518
3519         HWRM_UNLOCK();
3520
3521         return rc;
3522 }
3523
3524 static int bnxt_query_pf_resources(struct bnxt *bp,
3525                                    struct bnxt_pf_resource_info *pf_resc)
3526 {
3527         struct hwrm_func_qcfg_input req = {0};
3528         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3529         int rc;
3530
3531         /* And copy the allocated numbers into the pf struct */
3532         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3533         req.fid = rte_cpu_to_le_16(0xffff);
3534         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3535         HWRM_CHECK_RESULT();
3536
3537         pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3538         pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3539         pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3540         pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3541         pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3542         pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3543         pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
3544         bp->pf->evb_mode = resp->evb_mode;
3545
3546         HWRM_UNLOCK();
3547
3548         return rc;
3549 }
3550
3551 static void
3552 bnxt_calculate_pf_resources(struct bnxt *bp,
3553                             struct bnxt_pf_resource_info *pf_resc,
3554                             int num_vfs)
3555 {
3556         if (!num_vfs) {
3557                 pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
3558                 pf_resc->num_stat_ctxs = bp->max_stat_ctx;
3559                 pf_resc->num_cp_rings = bp->max_cp_rings;
3560                 pf_resc->num_tx_rings = bp->max_tx_rings;
3561                 pf_resc->num_rx_rings = bp->max_rx_rings;
3562                 pf_resc->num_l2_ctxs = bp->max_l2_ctx;
3563                 pf_resc->num_hw_ring_grps = bp->max_ring_grps;
3564
3565                 return;
3566         }
3567
3568         pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
3569                                    bp->max_rsscos_ctx % (num_vfs + 1);
3570         pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
3571                                  bp->max_stat_ctx % (num_vfs + 1);
3572         pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
3573                                 bp->max_cp_rings % (num_vfs + 1);
3574         pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
3575                                 bp->max_tx_rings % (num_vfs + 1);
3576         pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
3577                                 bp->max_rx_rings % (num_vfs + 1);
3578         pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
3579                                bp->max_l2_ctx % (num_vfs + 1);
3580         pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
3581                                     bp->max_ring_grps % (num_vfs + 1);
3582 }
3583
3584 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3585 {
3586         struct bnxt_pf_resource_info pf_resc = { 0 };
3587         int rc;
3588
3589         if (!BNXT_PF(bp)) {
3590                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3591                 return -EINVAL;
3592         }
3593
3594         rc = bnxt_hwrm_func_qcaps(bp);
3595         if (rc)
3596                 return rc;
3597
3598         bnxt_calculate_pf_resources(bp, &pf_resc, 0);
3599
3600         bp->pf->func_cfg_flags &=
3601                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3602                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3603         bp->pf->func_cfg_flags |=
3604                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3605
3606         rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
3607         if (rc)
3608                 return rc;
3609
3610         rc = bnxt_update_max_resources_pf_only(bp);
3611
3612         return rc;
3613 }
3614
3615 static int
3616 bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
3617 {
3618         size_t req_buf_sz, sz;
3619         int i, rc;
3620
3621         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3622         bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3623                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3624         if (bp->pf->vf_req_buf == NULL) {
3625                 return -ENOMEM;
3626         }
3627
3628         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3629                 rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3630
3631         for (i = 0; i < num_vfs; i++)
3632                 bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3633                                              (i * HWRM_MAX_REQ_LEN);
3634
3635         rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
3636         if (rc)
3637                 rte_free(bp->pf->vf_req_buf);
3638
3639         return rc;
3640 }
3641
3642 static int
3643 bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
3644 {
3645         struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3646         struct hwrm_func_vf_resource_cfg_input req = {0};
3647         int i, rc = 0;
3648
3649         bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
3650         bp->pf->active_vfs = 0;
3651         for (i = 0; i < num_vfs; i++) {
3652                 HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
3653                 req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3654                 rc = bnxt_hwrm_send_message(bp,
3655                                             &req,
3656                                             sizeof(req),
3657                                             BNXT_USE_CHIMP_MB);
3658                 if (rc || resp->error_code) {
3659                         PMD_DRV_LOG(ERR,
3660                                 "Failed to initialize VF %d\n", i);
3661                         PMD_DRV_LOG(ERR,
3662                                 "Not all VFs available. (%d, %d)\n",
3663                                 rc, resp->error_code);
3664                         HWRM_UNLOCK();
3665
3666                         /* If the first VF configuration itself fails,
3667                          * unregister the vf_fwd_request buffer.
3668                          */
3669                         if (i == 0)
3670                                 bnxt_hwrm_func_buf_unrgtr(bp);
3671                         break;
3672                 }
3673                 HWRM_UNLOCK();
3674
3675                 /* Update the max resource values based on the resource values
3676                  * allocated to the VF.
3677                  */
3678                 bnxt_update_max_resources(bp, i);
3679                 bp->pf->active_vfs++;
3680                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3681         }
3682
3683         return 0;
3684 }
3685
3686 static int
3687 bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
3688 {
3689         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3690         struct hwrm_func_cfg_input req = {0};
3691         int i, rc;
3692
3693         bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
3694
3695         bp->pf->active_vfs = 0;
3696         for (i = 0; i < num_vfs; i++) {
3697                 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3698                 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3699                 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3700                 rc = bnxt_hwrm_send_message(bp,
3701                                             &req,
3702                                             sizeof(req),
3703                                             BNXT_USE_CHIMP_MB);
3704
3705                 /* Clear enable flag for next pass */
3706                 req.enables &= ~rte_cpu_to_le_32(
3707                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3708
3709                 if (rc || resp->error_code) {
3710                         PMD_DRV_LOG(ERR,
3711                                 "Failed to initialize VF %d\n", i);
3712                         PMD_DRV_LOG(ERR,
3713                                 "Not all VFs available. (%d, %d)\n",
3714                                 rc, resp->error_code);
3715                         HWRM_UNLOCK();
3716
3717                         /* If the first VF configuration itself fails,
3718                          * unregister the vf_fwd_request buffer.
3719                          */
3720                         if (i == 0)
3721                                 bnxt_hwrm_func_buf_unrgtr(bp);
3722                         break;
3723                 }
3724
3725                 HWRM_UNLOCK();
3726
3727                 /* Update the max resource values based on the resource values
3728                  * allocated to the VF.
3729                  */
3730                 bnxt_update_max_resources(bp, i);
3731                 bp->pf->active_vfs++;
3732                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3733         }
3734
3735         return 0;
3736 }
3737
3738 static void
3739 bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
3740 {
3741         if (bp->flags & BNXT_FLAG_NEW_RM)
3742                 bnxt_process_vf_resc_config_new(bp, num_vfs);
3743         else
3744                 bnxt_process_vf_resc_config_old(bp, num_vfs);
3745 }
3746
3747 static void
3748 bnxt_update_pf_resources(struct bnxt *bp,
3749                          struct bnxt_pf_resource_info *pf_resc)
3750 {
3751         bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
3752         bp->max_stat_ctx = pf_resc->num_stat_ctxs;
3753         bp->max_cp_rings = pf_resc->num_cp_rings;
3754         bp->max_tx_rings = pf_resc->num_tx_rings;
3755         bp->max_rx_rings = pf_resc->num_rx_rings;
3756         bp->max_ring_grps = pf_resc->num_hw_ring_grps;
3757 }
3758
3759 static int32_t
3760 bnxt_configure_pf_resources(struct bnxt *bp,
3761                             struct bnxt_pf_resource_info *pf_resc)
3762 {
3763         /*
3764          * We're using STD_TX_RING_MODE here which will limit the TX
3765          * rings. This will allow QoS to function properly. Not setting this
3766          * will cause PF rings to break bandwidth settings.
3767          */
3768         bp->pf->func_cfg_flags &=
3769                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3770                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3771         bp->pf->func_cfg_flags |=
3772                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3773         return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
3774 }
3775
3776 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3777 {
3778         struct bnxt_pf_resource_info pf_resc = { 0 };
3779         int rc;
3780
3781         if (!BNXT_PF(bp)) {
3782                 PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
3783                 return -EINVAL;
3784         }
3785
3786         rc = bnxt_hwrm_func_qcaps(bp);
3787         if (rc)
3788                 return rc;
3789
3790         bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
3791
3792         rc = bnxt_configure_pf_resources(bp, &pf_resc);
3793         if (rc)
3794                 return rc;
3795
3796         rc = bnxt_query_pf_resources(bp, &pf_resc);
3797         if (rc)
3798                 return rc;
3799
3800         /*
3801          * Now, create and register a buffer to hold forwarded VF requests
3802          */
3803         rc = bnxt_configure_vf_req_buf(bp, num_vfs);
3804         if (rc)
3805                 return rc;
3806
3807         bnxt_configure_vf_resources(bp, num_vfs);
3808
3809         bnxt_update_pf_resources(bp, &pf_resc);
3810
3811         return 0;
3812 }
3813
3814 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3815 {
3816         struct hwrm_func_cfg_input req = {0};
3817         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3818         int rc;
3819
3820         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3821
3822         req.fid = rte_cpu_to_le_16(0xffff);
3823         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3824         req.evb_mode = bp->pf->evb_mode;
3825
3826         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3827         HWRM_CHECK_RESULT();
3828         HWRM_UNLOCK();
3829
3830         return rc;
3831 }
3832
3833 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3834                                 uint8_t tunnel_type)
3835 {
3836         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3837         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3838         int rc = 0;
3839
3840         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3841         req.tunnel_type = tunnel_type;
3842         req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
3843         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3844         HWRM_CHECK_RESULT();
3845
3846         switch (tunnel_type) {
3847         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3848                 bp->vxlan_fw_dst_port_id =
3849                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3850                 bp->vxlan_port = port;
3851                 break;
3852         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3853                 bp->geneve_fw_dst_port_id =
3854                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3855                 bp->geneve_port = port;
3856                 break;
3857         default:
3858                 break;
3859         }
3860
3861         HWRM_UNLOCK();
3862
3863         return rc;
3864 }
3865
3866 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3867                                 uint8_t tunnel_type)
3868 {
3869         struct hwrm_tunnel_dst_port_free_input req = {0};
3870         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3871         int rc = 0;
3872
3873         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3874
3875         req.tunnel_type = tunnel_type;
3876         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3877         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3878
3879         HWRM_CHECK_RESULT();
3880         HWRM_UNLOCK();
3881
3882         if (tunnel_type ==
3883             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) {
3884                 bp->vxlan_port = 0;
3885                 bp->vxlan_port_cnt = 0;
3886         }
3887
3888         if (tunnel_type ==
3889             HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) {
3890                 bp->geneve_port = 0;
3891                 bp->geneve_port_cnt = 0;
3892         }
3893
3894         return rc;
3895 }
3896
3897 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3898                                         uint32_t flags)
3899 {
3900         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3901         struct hwrm_func_cfg_input req = {0};
3902         int rc;
3903
3904         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3905
3906         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3907         req.flags = rte_cpu_to_le_32(flags);
3908         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3909
3910         HWRM_CHECK_RESULT();
3911         HWRM_UNLOCK();
3912
3913         return rc;
3914 }
3915
3916 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3917 {
3918         uint32_t *flag = flagp;
3919
3920         vnic->flags = *flag;
3921 }
3922
3923 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3924 {
3925         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3926 }
3927
3928 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
3929 {
3930         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3931         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3932         int rc;
3933
3934         HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3935
3936         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3937         req.req_buf_page_size =
3938                 rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
3939         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3940         req.req_buf_page_addr0 =
3941                 rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
3942         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3943                 PMD_DRV_LOG(ERR,
3944                         "unable to map buffer address to physical memory\n");
3945                 HWRM_UNLOCK();
3946                 return -ENOMEM;
3947         }
3948
3949         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3950
3951         HWRM_CHECK_RESULT();
3952         HWRM_UNLOCK();
3953
3954         return rc;
3955 }
3956
3957 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3958 {
3959         int rc = 0;
3960         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3961         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3962
3963         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3964                 return 0;
3965
3966         HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3967
3968         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3969
3970         HWRM_CHECK_RESULT();
3971         HWRM_UNLOCK();
3972
3973         return rc;
3974 }
3975
3976 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3977 {
3978         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3979         struct hwrm_func_cfg_input req = {0};
3980         int rc;
3981
3982         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3983
3984         req.fid = rte_cpu_to_le_16(0xffff);
3985         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3986         req.enables = rte_cpu_to_le_32(
3987                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3988         req.async_event_cr = rte_cpu_to_le_16(
3989                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3990         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3991
3992         HWRM_CHECK_RESULT();
3993         HWRM_UNLOCK();
3994
3995         return rc;
3996 }
3997
3998 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3999 {
4000         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4001         struct hwrm_func_vf_cfg_input req = {0};
4002         int rc;
4003
4004         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
4005
4006         req.enables = rte_cpu_to_le_32(
4007                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
4008         req.async_event_cr = rte_cpu_to_le_16(
4009                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
4010         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4011
4012         HWRM_CHECK_RESULT();
4013         HWRM_UNLOCK();
4014
4015         return rc;
4016 }
4017
4018 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
4019 {
4020         struct hwrm_func_cfg_input req = {0};
4021         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4022         uint16_t dflt_vlan, fid;
4023         uint32_t func_cfg_flags;
4024         int rc = 0;
4025
4026         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4027
4028         if (is_vf) {
4029                 dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
4030                 fid = bp->pf->vf_info[vf].fid;
4031                 func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
4032         } else {
4033                 fid = rte_cpu_to_le_16(0xffff);
4034                 func_cfg_flags = bp->pf->func_cfg_flags;
4035                 dflt_vlan = bp->vlan;
4036         }
4037
4038         req.flags = rte_cpu_to_le_32(func_cfg_flags);
4039         req.fid = rte_cpu_to_le_16(fid);
4040         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4041         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
4042
4043         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4044
4045         HWRM_CHECK_RESULT();
4046         HWRM_UNLOCK();
4047
4048         return rc;
4049 }
4050
4051 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
4052                         uint16_t max_bw, uint16_t enables)
4053 {
4054         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4055         struct hwrm_func_cfg_input req = {0};
4056         int rc;
4057
4058         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4059
4060         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4061         req.enables |= rte_cpu_to_le_32(enables);
4062         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4063         req.max_bw = rte_cpu_to_le_32(max_bw);
4064         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4065
4066         HWRM_CHECK_RESULT();
4067         HWRM_UNLOCK();
4068
4069         return rc;
4070 }
4071
4072 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
4073 {
4074         struct hwrm_func_cfg_input req = {0};
4075         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4076         int rc = 0;
4077
4078         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4079
4080         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
4081         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4082         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4083         req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
4084
4085         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4086
4087         HWRM_CHECK_RESULT();
4088         HWRM_UNLOCK();
4089
4090         return rc;
4091 }
4092
4093 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
4094 {
4095         int rc;
4096
4097         if (BNXT_PF(bp))
4098                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
4099         else
4100                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
4101
4102         return rc;
4103 }
4104
4105 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
4106                               void *encaped, size_t ec_size)
4107 {
4108         int rc = 0;
4109         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
4110         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4111
4112         if (ec_size > sizeof(req.encap_request))
4113                 return -1;
4114
4115         HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
4116
4117         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4118         memcpy(req.encap_request, encaped, ec_size);
4119
4120         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4121
4122         HWRM_CHECK_RESULT();
4123         HWRM_UNLOCK();
4124
4125         return rc;
4126 }
4127
4128 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
4129                                        struct rte_ether_addr *mac)
4130 {
4131         struct hwrm_func_qcfg_input req = {0};
4132         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4133         int rc;
4134
4135         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4136
4137         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4138         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4139
4140         HWRM_CHECK_RESULT();
4141
4142         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
4143
4144         HWRM_UNLOCK();
4145
4146         return rc;
4147 }
4148
4149 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
4150                             void *encaped, size_t ec_size)
4151 {
4152         int rc = 0;
4153         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
4154         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4155
4156         if (ec_size > sizeof(req.encap_request))
4157                 return -1;
4158
4159         HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
4160
4161         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4162         memcpy(req.encap_request, encaped, ec_size);
4163
4164         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4165
4166         HWRM_CHECK_RESULT();
4167         HWRM_UNLOCK();
4168
4169         return rc;
4170 }
4171
4172 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
4173                          struct rte_eth_stats *stats, uint8_t rx)
4174 {
4175         int rc = 0;
4176         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
4177         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
4178
4179         HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
4180
4181         req.stat_ctx_id = rte_cpu_to_le_32(cid);
4182
4183         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4184
4185         HWRM_CHECK_RESULT();
4186
4187         if (rx) {
4188                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
4189                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
4190                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
4191                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
4192                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
4193                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
4194                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts);
4195                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts);
4196         } else {
4197                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
4198                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
4199                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
4200                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
4201                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
4202                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
4203         }
4204
4205         HWRM_UNLOCK();
4206
4207         return rc;
4208 }
4209
4210 int bnxt_hwrm_port_qstats(struct bnxt *bp)
4211 {
4212         struct hwrm_port_qstats_input req = {0};
4213         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
4214         struct bnxt_pf_info *pf = bp->pf;
4215         int rc;
4216
4217         HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
4218
4219         req.port_id = rte_cpu_to_le_16(pf->port_id);
4220         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
4221         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
4222         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4223
4224         HWRM_CHECK_RESULT();
4225         HWRM_UNLOCK();
4226
4227         return rc;
4228 }
4229
4230 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
4231 {
4232         struct hwrm_port_clr_stats_input req = {0};
4233         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
4234         struct bnxt_pf_info *pf = bp->pf;
4235         int rc;
4236
4237         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
4238         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
4239             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
4240                 return 0;
4241
4242         HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4243
4244         req.port_id = rte_cpu_to_le_16(pf->port_id);
4245         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4246
4247         HWRM_CHECK_RESULT();
4248         HWRM_UNLOCK();
4249
4250         return rc;
4251 }
4252
4253 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4254 {
4255         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4256         struct hwrm_port_led_qcaps_input req = {0};
4257         int rc;
4258
4259         if (BNXT_VF(bp))
4260                 return 0;
4261
4262         HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4263         req.port_id = bp->pf->port_id;
4264         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4265
4266         HWRM_CHECK_RESULT();
4267
4268         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4269                 unsigned int i;
4270
4271                 bp->leds->num_leds = resp->num_leds;
4272                 memcpy(bp->leds, &resp->led0_id,
4273                         sizeof(bp->leds[0]) * bp->leds->num_leds);
4274                 for (i = 0; i < bp->leds->num_leds; i++) {
4275                         struct bnxt_led_info *led = &bp->leds[i];
4276
4277                         uint16_t caps = led->led_state_caps;
4278
4279                         if (!led->led_group_id ||
4280                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
4281                                 bp->leds->num_leds = 0;
4282                                 break;
4283                         }
4284                 }
4285         }
4286
4287         HWRM_UNLOCK();
4288
4289         return rc;
4290 }
4291
4292 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4293 {
4294         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4295         struct hwrm_port_led_cfg_input req = {0};
4296         struct bnxt_led_cfg *led_cfg;
4297         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4298         uint16_t duration = 0;
4299         int rc, i;
4300
4301         if (!bp->leds->num_leds || BNXT_VF(bp))
4302                 return -EOPNOTSUPP;
4303
4304         HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4305
4306         if (led_on) {
4307                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4308                 duration = rte_cpu_to_le_16(500);
4309         }
4310         req.port_id = bp->pf->port_id;
4311         req.num_leds = bp->leds->num_leds;
4312         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4313         for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4314                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
4315                 led_cfg->led_id = bp->leds[i].led_id;
4316                 led_cfg->led_state = led_state;
4317                 led_cfg->led_blink_on = duration;
4318                 led_cfg->led_blink_off = duration;
4319                 led_cfg->led_group_id = bp->leds[i].led_group_id;
4320         }
4321
4322         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4323
4324         HWRM_CHECK_RESULT();
4325         HWRM_UNLOCK();
4326
4327         return rc;
4328 }
4329
4330 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4331                                uint32_t *length)
4332 {
4333         int rc;
4334         struct hwrm_nvm_get_dir_info_input req = {0};
4335         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4336
4337         HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4338
4339         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4340
4341         HWRM_CHECK_RESULT();
4342
4343         *entries = rte_le_to_cpu_32(resp->entries);
4344         *length = rte_le_to_cpu_32(resp->entry_length);
4345
4346         HWRM_UNLOCK();
4347         return rc;
4348 }
4349
4350 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4351 {
4352         int rc;
4353         uint32_t dir_entries;
4354         uint32_t entry_length;
4355         uint8_t *buf;
4356         size_t buflen;
4357         rte_iova_t dma_handle;
4358         struct hwrm_nvm_get_dir_entries_input req = {0};
4359         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4360
4361         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4362         if (rc != 0)
4363                 return rc;
4364
4365         *data++ = dir_entries;
4366         *data++ = entry_length;
4367         len -= 2;
4368         memset(data, 0xff, len);
4369
4370         buflen = dir_entries * entry_length;
4371         buf = rte_malloc("nvm_dir", buflen, 0);
4372         if (buf == NULL)
4373                 return -ENOMEM;
4374         dma_handle = rte_malloc_virt2iova(buf);
4375         if (dma_handle == RTE_BAD_IOVA) {
4376                 rte_free(buf);
4377                 PMD_DRV_LOG(ERR,
4378                         "unable to map response address to physical memory\n");
4379                 return -ENOMEM;
4380         }
4381         HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4382         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4383         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4384
4385         if (rc == 0)
4386                 memcpy(data, buf, len > buflen ? buflen : len);
4387
4388         rte_free(buf);
4389         HWRM_CHECK_RESULT();
4390         HWRM_UNLOCK();
4391
4392         return rc;
4393 }
4394
4395 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4396                              uint32_t offset, uint32_t length,
4397                              uint8_t *data)
4398 {
4399         int rc;
4400         uint8_t *buf;
4401         rte_iova_t dma_handle;
4402         struct hwrm_nvm_read_input req = {0};
4403         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4404
4405         buf = rte_malloc("nvm_item", length, 0);
4406         if (!buf)
4407                 return -ENOMEM;
4408
4409         dma_handle = rte_malloc_virt2iova(buf);
4410         if (dma_handle == RTE_BAD_IOVA) {
4411                 rte_free(buf);
4412                 PMD_DRV_LOG(ERR,
4413                         "unable to map response address to physical memory\n");
4414                 return -ENOMEM;
4415         }
4416         HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4417         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4418         req.dir_idx = rte_cpu_to_le_16(index);
4419         req.offset = rte_cpu_to_le_32(offset);
4420         req.len = rte_cpu_to_le_32(length);
4421         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4422         if (rc == 0)
4423                 memcpy(data, buf, length);
4424
4425         rte_free(buf);
4426         HWRM_CHECK_RESULT();
4427         HWRM_UNLOCK();
4428
4429         return rc;
4430 }
4431
4432 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4433 {
4434         int rc;
4435         struct hwrm_nvm_erase_dir_entry_input req = {0};
4436         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4437
4438         HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4439         req.dir_idx = rte_cpu_to_le_16(index);
4440         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4441         HWRM_CHECK_RESULT();
4442         HWRM_UNLOCK();
4443
4444         return rc;
4445 }
4446
4447
4448 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4449                           uint16_t dir_ordinal, uint16_t dir_ext,
4450                           uint16_t dir_attr, const uint8_t *data,
4451                           size_t data_len)
4452 {
4453         int rc;
4454         struct hwrm_nvm_write_input req = {0};
4455         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4456         rte_iova_t dma_handle;
4457         uint8_t *buf;
4458
4459         buf = rte_malloc("nvm_write", data_len, 0);
4460         if (!buf)
4461                 return -ENOMEM;
4462
4463         dma_handle = rte_malloc_virt2iova(buf);
4464         if (dma_handle == RTE_BAD_IOVA) {
4465                 rte_free(buf);
4466                 PMD_DRV_LOG(ERR,
4467                         "unable to map response address to physical memory\n");
4468                 return -ENOMEM;
4469         }
4470         memcpy(buf, data, data_len);
4471
4472         HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4473
4474         req.dir_type = rte_cpu_to_le_16(dir_type);
4475         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4476         req.dir_ext = rte_cpu_to_le_16(dir_ext);
4477         req.dir_attr = rte_cpu_to_le_16(dir_attr);
4478         req.dir_data_length = rte_cpu_to_le_32(data_len);
4479         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4480
4481         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4482
4483         rte_free(buf);
4484         HWRM_CHECK_RESULT();
4485         HWRM_UNLOCK();
4486
4487         return rc;
4488 }
4489
4490 static void
4491 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4492 {
4493         uint32_t *count = cbdata;
4494
4495         *count = *count + 1;
4496 }
4497
4498 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4499                                      struct bnxt_vnic_info *vnic __rte_unused)
4500 {
4501         return 0;
4502 }
4503
4504 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4505 {
4506         uint32_t count = 0;
4507
4508         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4509             &count, bnxt_vnic_count_hwrm_stub);
4510
4511         return count;
4512 }
4513
4514 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4515                                         uint16_t *vnic_ids)
4516 {
4517         struct hwrm_func_vf_vnic_ids_query_input req = {0};
4518         struct hwrm_func_vf_vnic_ids_query_output *resp =
4519                                                 bp->hwrm_cmd_resp_addr;
4520         int rc;
4521
4522         /* First query all VNIC ids */
4523         HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4524
4525         req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4526         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4527         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4528
4529         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4530                 HWRM_UNLOCK();
4531                 PMD_DRV_LOG(ERR,
4532                 "unable to map VNIC ID table address to physical memory\n");
4533                 return -ENOMEM;
4534         }
4535         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4536         HWRM_CHECK_RESULT();
4537         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4538
4539         HWRM_UNLOCK();
4540
4541         return rc;
4542 }
4543
4544 /*
4545  * This function queries the VNIC IDs  for a specified VF. It then calls
4546  * the vnic_cb to update the necessary field in vnic_info with cbdata.
4547  * Then it calls the hwrm_cb function to program this new vnic configuration.
4548  */
4549 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4550         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4551         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4552 {
4553         struct bnxt_vnic_info vnic;
4554         int rc = 0;
4555         int i, num_vnic_ids;
4556         uint16_t *vnic_ids;
4557         size_t vnic_id_sz;
4558         size_t sz;
4559
4560         /* First query all VNIC ids */
4561         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4562         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4563                         RTE_CACHE_LINE_SIZE);
4564         if (vnic_ids == NULL)
4565                 return -ENOMEM;
4566
4567         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4568                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4569
4570         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4571
4572         if (num_vnic_ids < 0)
4573                 return num_vnic_ids;
4574
4575         /* Retrieve VNIC, update bd_stall then update */
4576
4577         for (i = 0; i < num_vnic_ids; i++) {
4578                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4579                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4580                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4581                 if (rc)
4582                         break;
4583                 if (vnic.mru <= 4)      /* Indicates unallocated */
4584                         continue;
4585
4586                 vnic_cb(&vnic, cbdata);
4587
4588                 rc = hwrm_cb(bp, &vnic);
4589                 if (rc)
4590                         break;
4591         }
4592
4593         rte_free(vnic_ids);
4594
4595         return rc;
4596 }
4597
4598 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4599                                               bool on)
4600 {
4601         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4602         struct hwrm_func_cfg_input req = {0};
4603         int rc;
4604
4605         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4606
4607         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4608         req.enables |= rte_cpu_to_le_32(
4609                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4610         req.vlan_antispoof_mode = on ?
4611                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4612                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4613         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4614
4615         HWRM_CHECK_RESULT();
4616         HWRM_UNLOCK();
4617
4618         return rc;
4619 }
4620
4621 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4622 {
4623         struct bnxt_vnic_info vnic;
4624         uint16_t *vnic_ids;
4625         size_t vnic_id_sz;
4626         int num_vnic_ids, i;
4627         size_t sz;
4628         int rc;
4629
4630         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4631         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4632                         RTE_CACHE_LINE_SIZE);
4633         if (vnic_ids == NULL)
4634                 return -ENOMEM;
4635
4636         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4637                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4638
4639         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4640         if (rc <= 0)
4641                 goto exit;
4642         num_vnic_ids = rc;
4643
4644         /*
4645          * Loop through to find the default VNIC ID.
4646          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4647          * by sending the hwrm_func_qcfg command to the firmware.
4648          */
4649         for (i = 0; i < num_vnic_ids; i++) {
4650                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4651                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4652                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4653                                         bp->pf->first_vf_id + vf);
4654                 if (rc)
4655                         goto exit;
4656                 if (vnic.func_default) {
4657                         rte_free(vnic_ids);
4658                         return vnic.fw_vnic_id;
4659                 }
4660         }
4661         /* Could not find a default VNIC. */
4662         PMD_DRV_LOG(ERR, "No default VNIC\n");
4663 exit:
4664         rte_free(vnic_ids);
4665         return rc;
4666 }
4667
4668 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4669                          uint16_t dst_id,
4670                          struct bnxt_filter_info *filter)
4671 {
4672         int rc = 0;
4673         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4674         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4675         uint32_t enables = 0;
4676
4677         if (filter->fw_em_filter_id != UINT64_MAX)
4678                 bnxt_hwrm_clear_em_filter(bp, filter);
4679
4680         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4681
4682         req.flags = rte_cpu_to_le_32(filter->flags);
4683
4684         enables = filter->enables |
4685               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4686         req.dst_id = rte_cpu_to_le_16(dst_id);
4687
4688         if (filter->ip_addr_type) {
4689                 req.ip_addr_type = filter->ip_addr_type;
4690                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4691         }
4692         if (enables &
4693             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4694                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4695         if (enables &
4696             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4697                 memcpy(req.src_macaddr, filter->src_macaddr,
4698                        RTE_ETHER_ADDR_LEN);
4699         if (enables &
4700             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4701                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4702                        RTE_ETHER_ADDR_LEN);
4703         if (enables &
4704             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4705                 req.ovlan_vid = filter->l2_ovlan;
4706         if (enables &
4707             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4708                 req.ivlan_vid = filter->l2_ivlan;
4709         if (enables &
4710             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4711                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4712         if (enables &
4713             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4714                 req.ip_protocol = filter->ip_protocol;
4715         if (enables &
4716             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4717                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4718         if (enables &
4719             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4720                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4721         if (enables &
4722             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4723                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4724         if (enables &
4725             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4726                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4727         if (enables &
4728             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4729                 req.mirror_vnic_id = filter->mirror_vnic_id;
4730
4731         req.enables = rte_cpu_to_le_32(enables);
4732
4733         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4734
4735         HWRM_CHECK_RESULT();
4736
4737         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4738         HWRM_UNLOCK();
4739
4740         return rc;
4741 }
4742
4743 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4744 {
4745         int rc = 0;
4746         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4747         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4748
4749         if (filter->fw_em_filter_id == UINT64_MAX)
4750                 return 0;
4751
4752         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4753
4754         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4755
4756         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4757
4758         HWRM_CHECK_RESULT();
4759         HWRM_UNLOCK();
4760
4761         filter->fw_em_filter_id = UINT64_MAX;
4762         filter->fw_l2_filter_id = UINT64_MAX;
4763
4764         return 0;
4765 }
4766
4767 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4768                          uint16_t dst_id,
4769                          struct bnxt_filter_info *filter)
4770 {
4771         int rc = 0;
4772         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4773         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4774                                                 bp->hwrm_cmd_resp_addr;
4775         uint32_t enables = 0;
4776
4777         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4778                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4779
4780         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4781
4782         req.flags = rte_cpu_to_le_32(filter->flags);
4783
4784         enables = filter->enables |
4785               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4786         req.dst_id = rte_cpu_to_le_16(dst_id);
4787
4788         if (filter->ip_addr_type) {
4789                 req.ip_addr_type = filter->ip_addr_type;
4790                 enables |=
4791                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4792         }
4793         if (enables &
4794             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4795                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4796         if (enables &
4797             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4798                 memcpy(req.src_macaddr, filter->src_macaddr,
4799                        RTE_ETHER_ADDR_LEN);
4800         if (enables &
4801             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4802                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4803         if (enables &
4804             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4805                 req.ip_protocol = filter->ip_protocol;
4806         if (enables &
4807             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4808                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4809         if (enables &
4810             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4811                 req.src_ipaddr_mask[0] =
4812                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4813         if (enables &
4814             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4815                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4816         if (enables &
4817             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4818                 req.dst_ipaddr_mask[0] =
4819                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4820         if (enables &
4821             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4822                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4823         if (enables &
4824             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4825                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4826         if (enables &
4827             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4828                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4829         if (enables &
4830             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4831                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4832         if (enables &
4833             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4834                 req.mirror_vnic_id = filter->mirror_vnic_id;
4835
4836         req.enables = rte_cpu_to_le_32(enables);
4837
4838         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4839
4840         HWRM_CHECK_RESULT();
4841
4842         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4843         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
4844         HWRM_UNLOCK();
4845
4846         return rc;
4847 }
4848
4849 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4850                                 struct bnxt_filter_info *filter)
4851 {
4852         int rc = 0;
4853         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4854         struct hwrm_cfa_ntuple_filter_free_output *resp =
4855                                                 bp->hwrm_cmd_resp_addr;
4856
4857         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4858                 return 0;
4859
4860         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4861
4862         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4863
4864         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4865
4866         HWRM_CHECK_RESULT();
4867         HWRM_UNLOCK();
4868
4869         filter->fw_ntuple_filter_id = UINT64_MAX;
4870
4871         return 0;
4872 }
4873
4874 static int
4875 bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4876 {
4877         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4878         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4879         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4880         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4881         uint16_t *ring_tbl = vnic->rss_table;
4882         int nr_ctxs = vnic->num_lb_ctxts;
4883         int max_rings = bp->rx_nr_rings;
4884         int i, j, k, cnt;
4885         int rc = 0;
4886
4887         for (i = 0, k = 0; i < nr_ctxs; i++) {
4888                 struct bnxt_rx_ring_info *rxr;
4889                 struct bnxt_cp_ring_info *cpr;
4890
4891                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4892
4893                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4894                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4895                 req.hash_mode_flags = vnic->hash_mode;
4896
4897                 req.ring_grp_tbl_addr =
4898                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4899                                      i * BNXT_RSS_ENTRIES_PER_CTX_P5 *
4900                                      2 * sizeof(*ring_tbl));
4901                 req.hash_key_tbl_addr =
4902                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4903
4904                 req.ring_table_pair_index = i;
4905                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4906
4907                 for (j = 0; j < 64; j++) {
4908                         uint16_t ring_id;
4909
4910                         /* Find next active ring. */
4911                         for (cnt = 0; cnt < max_rings; cnt++) {
4912                                 if (rx_queue_state[k] !=
4913                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4914                                         break;
4915                                 if (++k == max_rings)
4916                                         k = 0;
4917                         }
4918
4919                         /* Return if no rings are active. */
4920                         if (cnt == max_rings) {
4921                                 HWRM_UNLOCK();
4922                                 return 0;
4923                         }
4924
4925                         /* Add rx/cp ring pair to RSS table. */
4926                         rxr = rxqs[k]->rx_ring;
4927                         cpr = rxqs[k]->cp_ring;
4928
4929                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4930                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4931                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4932                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4933
4934                         if (++k == max_rings)
4935                                 k = 0;
4936                 }
4937                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4938                                             BNXT_USE_CHIMP_MB);
4939
4940                 HWRM_CHECK_RESULT();
4941                 HWRM_UNLOCK();
4942         }
4943
4944         return rc;
4945 }
4946
4947 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4948 {
4949         unsigned int rss_idx, fw_idx, i;
4950
4951         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4952                 return 0;
4953
4954         if (!(vnic->rss_table && vnic->hash_type))
4955                 return 0;
4956
4957         if (BNXT_CHIP_P5(bp))
4958                 return bnxt_vnic_rss_configure_p5(bp, vnic);
4959
4960         /*
4961          * Fill the RSS hash & redirection table with
4962          * ring group ids for all VNICs
4963          */
4964         for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4965              rss_idx++, fw_idx++) {
4966                 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4967                         fw_idx %= bp->rx_cp_nr_rings;
4968                         if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
4969                                 break;
4970                         fw_idx++;
4971                 }
4972
4973                 if (i == bp->rx_cp_nr_rings)
4974                         return 0;
4975
4976                 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4977         }
4978
4979         return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4980 }
4981
4982 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4983         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4984 {
4985         uint16_t flags;
4986
4987         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4988
4989         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4990         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4991
4992         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4993         req->num_cmpl_dma_aggr_during_int =
4994                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4995
4996         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4997
4998         /* min timer set to 1/2 of interrupt timer */
4999         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
5000
5001         /* buf timer set to 1/4 of interrupt timer */
5002         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
5003
5004         req->cmpl_aggr_dma_tmr_during_int =
5005                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
5006
5007         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5008                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5009         req->flags = rte_cpu_to_le_16(flags);
5010 }
5011
5012 static int bnxt_hwrm_set_coal_params_p5(struct bnxt *bp,
5013                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
5014 {
5015         struct hwrm_ring_aggint_qcaps_input req = {0};
5016         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5017         uint32_t enables;
5018         uint16_t flags;
5019         int rc;
5020
5021         HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
5022         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5023         HWRM_CHECK_RESULT();
5024
5025         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
5026         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
5027
5028         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
5029                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
5030         agg_req->flags = rte_cpu_to_le_16(flags);
5031         enables =
5032          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
5033          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
5034         agg_req->enables = rte_cpu_to_le_32(enables);
5035
5036         HWRM_UNLOCK();
5037         return rc;
5038 }
5039
5040 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
5041                         struct bnxt_coal *coal, uint16_t ring_id)
5042 {
5043         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5044         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
5045                                                 bp->hwrm_cmd_resp_addr;
5046         int rc;
5047
5048         /* Set ring coalesce parameters only for 100G NICs */
5049         if (BNXT_CHIP_P5(bp)) {
5050                 if (bnxt_hwrm_set_coal_params_p5(bp, &req))
5051                         return -1;
5052         } else if (bnxt_stratus_device(bp)) {
5053                 bnxt_hwrm_set_coal_params(coal, &req);
5054         } else {
5055                 return 0;
5056         }
5057
5058         HWRM_PREP(&req,
5059                   HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5060                   BNXT_USE_CHIMP_MB);
5061         req.ring_id = rte_cpu_to_le_16(ring_id);
5062         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5063         HWRM_CHECK_RESULT();
5064         HWRM_UNLOCK();
5065         return 0;
5066 }
5067
5068 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
5069 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5070 {
5071         struct hwrm_func_backing_store_qcaps_input req = {0};
5072         struct hwrm_func_backing_store_qcaps_output *resp =
5073                 bp->hwrm_cmd_resp_addr;
5074         struct bnxt_ctx_pg_info *ctx_pg;
5075         struct bnxt_ctx_mem_info *ctx;
5076         int total_alloc_len;
5077         int rc, i, tqm_rings;
5078
5079         if (!BNXT_CHIP_P5(bp) ||
5080             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
5081             BNXT_VF(bp) ||
5082             bp->ctx)
5083                 return 0;
5084
5085         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
5086         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5087         HWRM_CHECK_RESULT_SILENT();
5088
5089         total_alloc_len = sizeof(*ctx);
5090         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
5091                           RTE_CACHE_LINE_SIZE);
5092         if (!ctx) {
5093                 rc = -ENOMEM;
5094                 goto ctx_err;
5095         }
5096
5097         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
5098         ctx->qp_min_qp1_entries =
5099                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
5100         ctx->qp_max_l2_entries =
5101                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
5102         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
5103         ctx->srq_max_l2_entries =
5104                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
5105         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
5106         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
5107         ctx->cq_max_l2_entries =
5108                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
5109         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
5110         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
5111         ctx->vnic_max_vnic_entries =
5112                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
5113         ctx->vnic_max_ring_table_entries =
5114                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
5115         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
5116         ctx->stat_max_entries =
5117                 rte_le_to_cpu_32(resp->stat_max_entries);
5118         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
5119         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
5120         ctx->tqm_min_entries_per_ring =
5121                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
5122         ctx->tqm_max_entries_per_ring =
5123                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
5124         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5125         if (!ctx->tqm_entries_multiple)
5126                 ctx->tqm_entries_multiple = 1;
5127         ctx->mrav_max_entries =
5128                 rte_le_to_cpu_32(resp->mrav_max_entries);
5129         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
5130         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
5131         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
5132         ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
5133
5134         if (!ctx->tqm_fp_rings_count)
5135                 ctx->tqm_fp_rings_count = bp->max_q;
5136
5137         tqm_rings = ctx->tqm_fp_rings_count + 1;
5138
5139         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
5140                             sizeof(*ctx_pg) * tqm_rings,
5141                             RTE_CACHE_LINE_SIZE);
5142         if (!ctx_pg) {
5143                 rc = -ENOMEM;
5144                 goto ctx_err;
5145         }
5146         for (i = 0; i < tqm_rings; i++, ctx_pg++)
5147                 ctx->tqm_mem[i] = ctx_pg;
5148
5149         bp->ctx = ctx;
5150 ctx_err:
5151         HWRM_UNLOCK();
5152         return rc;
5153 }
5154
5155 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
5156 {
5157         struct hwrm_func_backing_store_cfg_input req = {0};
5158         struct hwrm_func_backing_store_cfg_output *resp =
5159                 bp->hwrm_cmd_resp_addr;
5160         struct bnxt_ctx_mem_info *ctx = bp->ctx;
5161         struct bnxt_ctx_pg_info *ctx_pg;
5162         uint32_t *num_entries;
5163         uint64_t *pg_dir;
5164         uint8_t *pg_attr;
5165         uint32_t ena;
5166         int i, rc;
5167
5168         if (!ctx)
5169                 return 0;
5170
5171         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
5172         req.enables = rte_cpu_to_le_32(enables);
5173
5174         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
5175                 ctx_pg = &ctx->qp_mem;
5176                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5177                 req.qp_num_qp1_entries =
5178                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
5179                 req.qp_num_l2_entries =
5180                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
5181                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
5182                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5183                                       &req.qpc_pg_size_qpc_lvl,
5184                                       &req.qpc_page_dir);
5185         }
5186
5187         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
5188                 ctx_pg = &ctx->srq_mem;
5189                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5190                 req.srq_num_l2_entries =
5191                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
5192                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
5193                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5194                                       &req.srq_pg_size_srq_lvl,
5195                                       &req.srq_page_dir);
5196         }
5197
5198         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
5199                 ctx_pg = &ctx->cq_mem;
5200                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5201                 req.cq_num_l2_entries =
5202                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
5203                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
5204                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5205                                       &req.cq_pg_size_cq_lvl,
5206                                       &req.cq_page_dir);
5207         }
5208
5209         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
5210                 ctx_pg = &ctx->vnic_mem;
5211                 req.vnic_num_vnic_entries =
5212                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
5213                 req.vnic_num_ring_table_entries =
5214                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
5215                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
5216                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5217                                       &req.vnic_pg_size_vnic_lvl,
5218                                       &req.vnic_page_dir);
5219         }
5220
5221         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
5222                 ctx_pg = &ctx->stat_mem;
5223                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
5224                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
5225                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5226                                       &req.stat_pg_size_stat_lvl,
5227                                       &req.stat_page_dir);
5228         }
5229
5230         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5231         num_entries = &req.tqm_sp_num_entries;
5232         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
5233         pg_dir = &req.tqm_sp_page_dir;
5234         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
5235         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
5236                 if (!(enables & ena))
5237                         continue;
5238
5239                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5240
5241                 ctx_pg = ctx->tqm_mem[i];
5242                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5243                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5244         }
5245
5246         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5247         HWRM_CHECK_RESULT();
5248         HWRM_UNLOCK();
5249
5250         return rc;
5251 }
5252
5253 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5254 {
5255         struct hwrm_port_qstats_ext_input req = {0};
5256         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5257         struct bnxt_pf_info *pf = bp->pf;
5258         int rc;
5259
5260         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5261               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5262                 return 0;
5263
5264         HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5265
5266         req.port_id = rte_cpu_to_le_16(pf->port_id);
5267         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5268                 req.tx_stat_host_addr =
5269                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5270                 req.tx_stat_size =
5271                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5272         }
5273         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5274                 req.rx_stat_host_addr =
5275                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5276                 req.rx_stat_size =
5277                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5278         }
5279         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5280
5281         if (rc) {
5282                 bp->fw_rx_port_stats_ext_size = 0;
5283                 bp->fw_tx_port_stats_ext_size = 0;
5284         } else {
5285                 bp->fw_rx_port_stats_ext_size =
5286                         rte_le_to_cpu_16(resp->rx_stat_size);
5287                 bp->fw_tx_port_stats_ext_size =
5288                         rte_le_to_cpu_16(resp->tx_stat_size);
5289         }
5290
5291         HWRM_CHECK_RESULT();
5292         HWRM_UNLOCK();
5293
5294         return rc;
5295 }
5296
5297 int
5298 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5299 {
5300         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5301         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5302                 bp->hwrm_cmd_resp_addr;
5303         int rc = 0;
5304
5305         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5306         req.tunnel_type = type;
5307         req.dest_fid = bp->fw_fid;
5308         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5309         HWRM_CHECK_RESULT();
5310
5311         HWRM_UNLOCK();
5312
5313         return rc;
5314 }
5315
5316 int
5317 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5318 {
5319         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5320         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5321                 bp->hwrm_cmd_resp_addr;
5322         int rc = 0;
5323
5324         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5325         req.tunnel_type = type;
5326         req.dest_fid = bp->fw_fid;
5327         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5328         HWRM_CHECK_RESULT();
5329
5330         HWRM_UNLOCK();
5331
5332         return rc;
5333 }
5334
5335 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5336 {
5337         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5338         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5339                 bp->hwrm_cmd_resp_addr;
5340         int rc = 0;
5341
5342         HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5343         req.src_fid = bp->fw_fid;
5344         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5345         HWRM_CHECK_RESULT();
5346
5347         if (type)
5348                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
5349
5350         HWRM_UNLOCK();
5351
5352         return rc;
5353 }
5354
5355 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5356                                    uint16_t *dst_fid)
5357 {
5358         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5359         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5360                 bp->hwrm_cmd_resp_addr;
5361         int rc = 0;
5362
5363         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5364         req.src_fid = bp->fw_fid;
5365         req.tunnel_type = tun_type;
5366         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5367         HWRM_CHECK_RESULT();
5368
5369         if (dst_fid)
5370                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5371
5372         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5373
5374         HWRM_UNLOCK();
5375
5376         return rc;
5377 }
5378
5379 int bnxt_hwrm_set_mac(struct bnxt *bp)
5380 {
5381         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5382         struct hwrm_func_vf_cfg_input req = {0};
5383         int rc = 0;
5384
5385         if (!BNXT_VF(bp))
5386                 return 0;
5387
5388         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5389
5390         req.enables =
5391                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5392         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5393
5394         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5395
5396         HWRM_CHECK_RESULT();
5397
5398         HWRM_UNLOCK();
5399
5400         return rc;
5401 }
5402
5403 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5404 {
5405         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5406         struct hwrm_func_drv_if_change_input req = {0};
5407         uint32_t flags;
5408         int rc;
5409
5410         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5411                 return 0;
5412
5413         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5414          * If we issue FUNC_DRV_IF_CHANGE with flags down before
5415          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5416          */
5417         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5418                 return 0;
5419
5420         HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5421
5422         if (up)
5423                 req.flags =
5424                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5425
5426         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5427
5428         HWRM_CHECK_RESULT();
5429         flags = rte_le_to_cpu_32(resp->flags);
5430         HWRM_UNLOCK();
5431
5432         if (!up)
5433                 return 0;
5434
5435         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5436                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5437                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5438         }
5439
5440         return 0;
5441 }
5442
5443 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5444 {
5445         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5446         struct bnxt_error_recovery_info *info = bp->recovery_info;
5447         struct hwrm_error_recovery_qcfg_input req = {0};
5448         uint32_t flags = 0;
5449         unsigned int i;
5450         int rc;
5451
5452         /* Older FW does not have error recovery support */
5453         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5454                 return 0;
5455
5456         HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5457
5458         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5459
5460         HWRM_CHECK_RESULT();
5461
5462         flags = rte_le_to_cpu_32(resp->flags);
5463         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5464                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5465         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5466                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5467
5468         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5469             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5470                 rc = -EINVAL;
5471                 goto err;
5472         }
5473
5474         /* FW returned values are in units of 100msec */
5475         info->driver_polling_freq =
5476                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5477         info->master_func_wait_period =
5478                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5479         info->normal_func_wait_period =
5480                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5481         info->master_func_wait_period_after_reset =
5482                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5483         info->max_bailout_time_after_reset =
5484                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5485         info->status_regs[BNXT_FW_STATUS_REG] =
5486                 rte_le_to_cpu_32(resp->fw_health_status_reg);
5487         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5488                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5489         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5490                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5491         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5492                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
5493         info->reg_array_cnt =
5494                 rte_le_to_cpu_32(resp->reg_array_cnt);
5495
5496         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5497                 rc = -EINVAL;
5498                 goto err;
5499         }
5500
5501         for (i = 0; i < info->reg_array_cnt; i++) {
5502                 info->reset_reg[i] =
5503                         rte_le_to_cpu_32(resp->reset_reg[i]);
5504                 info->reset_reg_val[i] =
5505                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
5506                 info->delay_after_reset[i] =
5507                         resp->delay_after_reset[i];
5508         }
5509 err:
5510         HWRM_UNLOCK();
5511
5512         /* Map the FW status registers */
5513         if (!rc)
5514                 rc = bnxt_map_fw_health_status_regs(bp);
5515
5516         if (rc) {
5517                 rte_free(bp->recovery_info);
5518                 bp->recovery_info = NULL;
5519         }
5520         return rc;
5521 }
5522
5523 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5524 {
5525         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5526         struct hwrm_fw_reset_input req = {0};
5527         int rc;
5528
5529         if (!BNXT_PF(bp))
5530                 return -EOPNOTSUPP;
5531
5532         HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5533
5534         req.embedded_proc_type =
5535                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5536         req.selfrst_status =
5537                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5538         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5539
5540         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5541                                     BNXT_USE_KONG(bp));
5542
5543         HWRM_CHECK_RESULT();
5544         HWRM_UNLOCK();
5545
5546         return rc;
5547 }
5548
5549 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5550 {
5551         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5552         struct hwrm_port_ts_query_input req = {0};
5553         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5554         uint32_t flags = 0;
5555         int rc;
5556
5557         if (!ptp)
5558                 return 0;
5559
5560         HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5561
5562         switch (path) {
5563         case BNXT_PTP_FLAGS_PATH_TX:
5564                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5565                 break;
5566         case BNXT_PTP_FLAGS_PATH_RX:
5567                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5568                 break;
5569         case BNXT_PTP_FLAGS_CURRENT_TIME:
5570                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5571                 break;
5572         }
5573
5574         req.flags = rte_cpu_to_le_32(flags);
5575         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5576
5577         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5578
5579         HWRM_CHECK_RESULT();
5580
5581         if (timestamp) {
5582                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5583                 *timestamp |=
5584                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5585         }
5586         HWRM_UNLOCK();
5587
5588         return rc;
5589 }
5590
5591 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5592 {
5593         int rc = 0;
5594
5595         struct hwrm_cfa_counter_qcaps_input req = {0};
5596         struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5597
5598         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5599                 PMD_DRV_LOG(DEBUG,
5600                             "Not a PF or trusted VF. Command not supported\n");
5601                 return 0;
5602         }
5603
5604         HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5605         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5606         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5607
5608         HWRM_CHECK_RESULT();
5609         if (max_fc)
5610                 *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5611         HWRM_UNLOCK();
5612
5613         return 0;
5614 }
5615
5616 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5617 {
5618         int rc = 0;
5619         struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5620         struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5621
5622         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5623                 PMD_DRV_LOG(DEBUG,
5624                             "Not a PF or trusted VF. Command not supported\n");
5625                 return 0;
5626         }
5627
5628         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5629
5630         req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5631         req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5632         req.page_dir = rte_cpu_to_le_64(dma_addr);
5633
5634         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5635
5636         HWRM_CHECK_RESULT();
5637         if (ctx_id) {
5638                 *ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
5639                 PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5640         }
5641         HWRM_UNLOCK();
5642
5643         return 0;
5644 }
5645
5646 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5647 {
5648         int rc = 0;
5649         struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5650         struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5651
5652         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5653                 PMD_DRV_LOG(DEBUG,
5654                             "Not a PF or trusted VF. Command not supported\n");
5655                 return 0;
5656         }
5657
5658         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5659
5660         req.ctx_id = rte_cpu_to_le_16(ctx_id);
5661
5662         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5663
5664         HWRM_CHECK_RESULT();
5665         HWRM_UNLOCK();
5666
5667         return rc;
5668 }
5669
5670 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5671                               uint16_t cntr, uint16_t ctx_id,
5672                               uint32_t num_entries, bool enable)
5673 {
5674         struct hwrm_cfa_counter_cfg_input req = {0};
5675         struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5676         uint16_t flags = 0;
5677         int rc;
5678
5679         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5680                 PMD_DRV_LOG(DEBUG,
5681                             "Not a PF or trusted VF. Command not supported\n");
5682                 return 0;
5683         }
5684
5685         HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5686
5687         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5688         req.counter_type = rte_cpu_to_le_16(cntr);
5689         flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5690                 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5691         flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5692         if (dir == BNXT_DIR_RX)
5693                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5694         else if (dir == BNXT_DIR_TX)
5695                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5696         req.flags = rte_cpu_to_le_16(flags);
5697         req.ctx_id =  rte_cpu_to_le_16(ctx_id);
5698         req.num_entries = rte_cpu_to_le_32(num_entries);
5699
5700         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5701         HWRM_CHECK_RESULT();
5702         HWRM_UNLOCK();
5703
5704         return 0;
5705 }
5706
5707 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5708                                  enum bnxt_flow_dir dir,
5709                                  uint16_t cntr,
5710                                  uint16_t num_entries)
5711 {
5712         struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5713         struct hwrm_cfa_counter_qstats_input req = {0};
5714         uint16_t flow_ctx_id = 0;
5715         uint16_t flags = 0;
5716         int rc = 0;
5717
5718         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5719                 PMD_DRV_LOG(DEBUG,
5720                             "Not a PF or trusted VF. Command not supported\n");
5721                 return 0;
5722         }
5723
5724         if (dir == BNXT_DIR_RX) {
5725                 flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5726                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5727         } else if (dir == BNXT_DIR_TX) {
5728                 flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5729                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5730         }
5731
5732         HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5733         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5734         req.counter_type = rte_cpu_to_le_16(cntr);
5735         req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5736         req.num_entries = rte_cpu_to_le_16(num_entries);
5737         req.flags = rte_cpu_to_le_16(flags);
5738         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5739
5740         HWRM_CHECK_RESULT();
5741         HWRM_UNLOCK();
5742
5743         return 0;
5744 }
5745
5746 int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
5747                                 uint16_t *first_vf_id)
5748 {
5749         int rc = 0;
5750         struct hwrm_func_qcaps_input req = {.req_type = 0 };
5751         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5752
5753         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
5754
5755         req.fid = rte_cpu_to_le_16(fid);
5756
5757         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5758
5759         HWRM_CHECK_RESULT();
5760
5761         if (first_vf_id)
5762                 *first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
5763
5764         HWRM_UNLOCK();
5765
5766         return rc;
5767 }
5768
5769 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
5770 {
5771         struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5772         struct hwrm_cfa_pair_alloc_input req = {0};
5773         int rc;
5774
5775         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5776                 PMD_DRV_LOG(DEBUG,
5777                             "Not a PF or trusted VF. Command not supported\n");
5778                 return 0;
5779         }
5780
5781         HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
5782         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5783         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5784                  bp->eth_dev->data->name, rep_bp->vf_id);
5785
5786         req.pf_b_id = rep_bp->parent_pf_idx;
5787         req.vf_b_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5788                                                 rte_cpu_to_le_16(rep_bp->vf_id);
5789         req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
5790         req.host_b_id = 1; /* TBD - Confirm if this is OK */
5791
5792         req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
5793                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
5794         req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
5795                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
5796         req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
5797                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
5798         req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
5799                         HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
5800
5801         req.q_ab = rep_bp->rep_q_r2f;
5802         req.q_ba = rep_bp->rep_q_f2r;
5803         req.fc_ab = rep_bp->rep_fc_r2f;
5804         req.fc_ba = rep_bp->rep_fc_f2r;
5805
5806         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5807         HWRM_CHECK_RESULT();
5808
5809         HWRM_UNLOCK();
5810         PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
5811                     BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
5812         return rc;
5813 }
5814
5815 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
5816 {
5817         struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
5818         struct hwrm_cfa_pair_free_input req = {0};
5819         int rc;
5820
5821         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5822                 PMD_DRV_LOG(DEBUG,
5823                             "Not a PF or trusted VF. Command not supported\n");
5824                 return 0;
5825         }
5826
5827         HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
5828         snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5829                  bp->eth_dev->data->name, rep_bp->vf_id);
5830         req.pf_b_id = rep_bp->parent_pf_idx;
5831         req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5832         req.vf_id = BNXT_REP_PF(rep_bp) ? rte_cpu_to_le_16(((uint16_t)-1)) :
5833                                                 rte_cpu_to_le_16(rep_bp->vf_id);
5834         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5835         HWRM_CHECK_RESULT();
5836         HWRM_UNLOCK();
5837         PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",
5838                     rep_bp->vf_id);
5839         return rc;
5840 }
5841
5842 int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
5843 {
5844         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
5845                                         bp->hwrm_cmd_resp_addr;
5846         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
5847         uint32_t flags = 0;
5848         int rc = 0;
5849
5850         if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT))
5851                 return 0;
5852
5853         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5854                 PMD_DRV_LOG(DEBUG,
5855                             "Not a PF or trusted VF. Command not supported\n");
5856                 return 0;
5857         }
5858
5859         HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_CHIMP_MB);
5860         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5861
5862         HWRM_CHECK_RESULT();
5863         flags = rte_le_to_cpu_32(resp->flags);
5864         HWRM_UNLOCK();
5865
5866         if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_RFS_RING_TBL_IDX_V2_SUPPORTED)
5867                 bp->flags |= BNXT_FLAG_FLOW_CFA_RFS_RING_TBL_IDX_V2;
5868         else
5869                 bp->flags |= BNXT_FLAG_RFS_NEEDS_VNIC;
5870
5871         return rc;
5872 }