643dd989776e5ea24ca6e25dd49290cf910ae655
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14 #include <rte_io.h>
15
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #define HWRM_SPEC_CODE_1_8_3            0x10803
28 #define HWRM_VERSION_1_9_1              0x10901
29 #define HWRM_VERSION_1_9_2              0x10903
30
31 struct bnxt_plcmodes_cfg {
32         uint32_t        flags;
33         uint16_t        jumbo_thresh;
34         uint16_t        hds_offset;
35         uint16_t        hds_threshold;
36 };
37
38 static int page_getenum(size_t size)
39 {
40         if (size <= 1 << 4)
41                 return 4;
42         if (size <= 1 << 12)
43                 return 12;
44         if (size <= 1 << 13)
45                 return 13;
46         if (size <= 1 << 16)
47                 return 16;
48         if (size <= 1 << 21)
49                 return 21;
50         if (size <= 1 << 22)
51                 return 22;
52         if (size <= 1 << 30)
53                 return 30;
54         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55         return sizeof(void *) * 8 - 1;
56 }
57
58 static int page_roundup(size_t size)
59 {
60         return 1 << page_getenum(size);
61 }
62
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
64                                   uint8_t *pg_attr,
65                                   uint64_t *pg_dir)
66 {
67         if (rmem->nr_pages > 1) {
68                 *pg_attr = 1;
69                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
70         } else {
71                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
72         }
73 }
74
75 /*
76  * HWRM Functions (sent to HWRM)
77  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
78  * HWRM command times out, or a negative error code if the HWRM
79  * command was failed by the FW.
80  */
81
82 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
83                                   uint32_t msg_len, bool use_kong_mb)
84 {
85         unsigned int i;
86         struct input *req = msg;
87         struct output *resp = bp->hwrm_cmd_resp_addr;
88         uint32_t *data = msg;
89         uint8_t *bar;
90         uint8_t *valid;
91         uint16_t max_req_len = bp->max_req_len;
92         struct hwrm_short_input short_input = { 0 };
93         uint16_t bar_offset = use_kong_mb ?
94                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
95         uint16_t mb_trigger_offset = use_kong_mb ?
96                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
97         uint32_t timeout;
98
99         /* Do not send HWRM commands to firmware in error state */
100         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
101                 return 0;
102
103         timeout = bp->hwrm_cmd_timeout;
104
105         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
106             msg_len > bp->max_req_len) {
107                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
108
109                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
110                 memcpy(short_cmd_req, req, msg_len);
111
112                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
113                 short_input.signature = rte_cpu_to_le_16(
114                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
115                 short_input.size = rte_cpu_to_le_16(msg_len);
116                 short_input.req_addr =
117                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
118
119                 data = (uint32_t *)&short_input;
120                 msg_len = sizeof(short_input);
121
122                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
123         }
124
125         /* Write request msg to hwrm channel */
126         for (i = 0; i < msg_len; i += 4) {
127                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
128                 rte_write32(*data, bar);
129                 data++;
130         }
131
132         /* Zero the rest of the request space */
133         for (; i < max_req_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
135                 rte_write32(0, bar);
136         }
137
138         /* Ring channel doorbell */
139         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
140         rte_write32(1, bar);
141         /*
142          * Make sure the channel doorbell ring command complete before
143          * reading the response to avoid getting stale or invalid
144          * responses.
145          */
146         rte_io_mb();
147
148         /* Poll for the valid bit */
149         for (i = 0; i < timeout; i++) {
150                 /* Sanity check on the resp->resp_len */
151                 rte_cio_rmb();
152                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
153                         /* Last byte of resp contains the valid key */
154                         valid = (uint8_t *)resp + resp->resp_len - 1;
155                         if (*valid == HWRM_RESP_VALID_KEY)
156                                 break;
157                 }
158                 rte_delay_us(1);
159         }
160
161         if (i >= timeout) {
162                 /* Suppress VER_GET timeout messages during reset recovery */
163                 if (bp->flags & BNXT_FLAG_FW_RESET &&
164                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
165                         return -ETIMEDOUT;
166
167                 PMD_DRV_LOG(ERR,
168                             "Error(timeout) sending msg 0x%04x, seq_id %d\n",
169                             req->req_type, req->seq_id);
170                 return -ETIMEDOUT;
171         }
172         return 0;
173 }
174
175 /*
176  * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
177  * spinlock, and does initial processing.
178  *
179  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
180  * releases the spinlock only if it returns. If the regular int return codes
181  * are not used by the function, HWRM_CHECK_RESULT() should not be used
182  * directly, rather it should be copied and modified to suit the function.
183  *
184  * HWRM_UNLOCK() must be called after all response processing is completed.
185  */
186 #define HWRM_PREP(req, type, kong) do { \
187         rte_spinlock_lock(&bp->hwrm_lock); \
188         if (bp->hwrm_cmd_resp_addr == NULL) { \
189                 rte_spinlock_unlock(&bp->hwrm_lock); \
190                 return -EACCES; \
191         } \
192         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
193         (req)->req_type = rte_cpu_to_le_16(type); \
194         (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
195         (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
196                 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
197         (req)->target_id = rte_cpu_to_le_16(0xffff); \
198         (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
199 } while (0)
200
201 #define HWRM_CHECK_RESULT_SILENT() do {\
202         if (rc) { \
203                 rte_spinlock_unlock(&bp->hwrm_lock); \
204                 return rc; \
205         } \
206         if (resp->error_code) { \
207                 rc = rte_le_to_cpu_16(resp->error_code); \
208                 rte_spinlock_unlock(&bp->hwrm_lock); \
209                 return rc; \
210         } \
211 } while (0)
212
213 #define HWRM_CHECK_RESULT() do {\
214         if (rc) { \
215                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
216                 rte_spinlock_unlock(&bp->hwrm_lock); \
217                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
218                         rc = -EACCES; \
219                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
220                         rc = -ENOSPC; \
221                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
222                         rc = -EINVAL; \
223                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
224                         rc = -ENOTSUP; \
225                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
226                         rc = -EAGAIN; \
227                 else if (rc > 0) \
228                         rc = -EIO; \
229                 return rc; \
230         } \
231         if (resp->error_code) { \
232                 rc = rte_le_to_cpu_16(resp->error_code); \
233                 if (resp->resp_len >= 16) { \
234                         struct hwrm_err_output *tmp_hwrm_err_op = \
235                                                 (void *)resp; \
236                         PMD_DRV_LOG(ERR, \
237                                 "error %d:%d:%08x:%04x\n", \
238                                 rc, tmp_hwrm_err_op->cmd_err, \
239                                 rte_le_to_cpu_32(\
240                                         tmp_hwrm_err_op->opaque_0), \
241                                 rte_le_to_cpu_16(\
242                                         tmp_hwrm_err_op->opaque_1)); \
243                 } else { \
244                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
245                 } \
246                 rte_spinlock_unlock(&bp->hwrm_lock); \
247                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
248                         rc = -EACCES; \
249                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
250                         rc = -ENOSPC; \
251                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
252                         rc = -EINVAL; \
253                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
254                         rc = -ENOTSUP; \
255                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
256                         rc = -EAGAIN; \
257                 else if (rc > 0) \
258                         rc = -EIO; \
259                 return rc; \
260         } \
261 } while (0)
262
263 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
264
265 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
266                                 bool use_kong_mb,
267                                 uint16_t msg_type,
268                                 void *msg,
269                                 uint32_t msg_len,
270                                 void *resp_msg,
271                                 uint32_t resp_len)
272 {
273         int rc = 0;
274         bool mailbox = BNXT_USE_CHIMP_MB;
275         struct input *req = msg;
276         struct output *resp = bp->hwrm_cmd_resp_addr;
277
278         if (use_kong_mb)
279                 mailbox = BNXT_USE_KONG(bp);
280
281         HWRM_PREP(req, msg_type, mailbox);
282
283         rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
284
285         HWRM_CHECK_RESULT();
286
287         if (resp_msg)
288                 memcpy(resp_msg, resp, resp_len);
289
290         HWRM_UNLOCK();
291
292         return rc;
293 }
294
295 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
296                                   bool use_kong_mb,
297                                   uint16_t tf_type,
298                                   uint16_t tf_subtype,
299                                   uint32_t *tf_response_code,
300                                   void *msg,
301                                   uint32_t msg_len,
302                                   void *response,
303                                   uint32_t response_len)
304 {
305         int rc = 0;
306         struct hwrm_cfa_tflib_input req = { .req_type = 0 };
307         struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
308         bool mailbox = BNXT_USE_CHIMP_MB;
309
310         if (msg_len > sizeof(req.tf_req))
311                 return -ENOMEM;
312
313         if (use_kong_mb)
314                 mailbox = BNXT_USE_KONG(bp);
315
316         HWRM_PREP(&req, HWRM_TF, mailbox);
317         /* Build request using the user supplied request payload.
318          * TLV request size is checked at build time against HWRM
319          * request max size, thus no checking required.
320          */
321         req.tf_type = tf_type;
322         req.tf_subtype = tf_subtype;
323         memcpy(req.tf_req, msg, msg_len);
324
325         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
326         HWRM_CHECK_RESULT();
327
328         /* Copy the resp to user provided response buffer */
329         if (response != NULL)
330                 /* Post process response data. We need to copy only
331                  * the 'payload' as the HWRM data structure really is
332                  * HWRM header + msg header + payload and the TFLIB
333                  * only provided a payload place holder.
334                  */
335                 if (response_len != 0) {
336                         memcpy(response,
337                                resp->tf_resp,
338                                response_len);
339                 }
340
341         /* Extract the internal tflib response code */
342         *tf_response_code = resp->tf_resp_code;
343         HWRM_UNLOCK();
344
345         return rc;
346 }
347
348 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
349 {
350         int rc = 0;
351         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
352         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
353
354         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
355         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
356         req.mask = 0;
357
358         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
359
360         HWRM_CHECK_RESULT();
361         HWRM_UNLOCK();
362
363         return rc;
364 }
365
366 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
367                                  struct bnxt_vnic_info *vnic,
368                                  uint16_t vlan_count,
369                                  struct bnxt_vlan_table_entry *vlan_table)
370 {
371         int rc = 0;
372         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
373         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
374         uint32_t mask = 0;
375
376         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
377                 return rc;
378
379         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
380         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
381
382         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
383                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
384         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
385                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
386
387         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
388                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
389
390         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
391                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
392         } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
393                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
394                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
395                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
396         }
397         if (vlan_table) {
398                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
399                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
400                 req.vlan_tag_tbl_addr =
401                         rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
402                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
403         }
404         req.mask = rte_cpu_to_le_32(mask);
405
406         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
407
408         HWRM_CHECK_RESULT();
409         HWRM_UNLOCK();
410
411         return rc;
412 }
413
414 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
415                         uint16_t vlan_count,
416                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
417 {
418         int rc = 0;
419         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
420         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
421                                                 bp->hwrm_cmd_resp_addr;
422
423         /*
424          * Older HWRM versions did not support this command, and the set_rx_mask
425          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
426          * removed from set_rx_mask call, and this command was added.
427          *
428          * This command is also present from 1.7.8.11 and higher,
429          * as well as 1.7.8.0
430          */
431         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
432                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
433                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
434                                         (11)))
435                                 return 0;
436                 }
437         }
438         HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
439         req.fid = rte_cpu_to_le_16(fid);
440
441         req.vlan_tag_mask_tbl_addr =
442                 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
443         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
444
445         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
446
447         HWRM_CHECK_RESULT();
448         HWRM_UNLOCK();
449
450         return rc;
451 }
452
453 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
454                              struct bnxt_filter_info *filter)
455 {
456         int rc = 0;
457         struct bnxt_filter_info *l2_filter = filter;
458         struct bnxt_vnic_info *vnic = NULL;
459         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
460         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
461
462         if (filter->fw_l2_filter_id == UINT64_MAX)
463                 return 0;
464
465         if (filter->matching_l2_fltr_ptr)
466                 l2_filter = filter->matching_l2_fltr_ptr;
467
468         PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
469                     filter, l2_filter, l2_filter->l2_ref_cnt);
470
471         if (l2_filter->l2_ref_cnt == 0)
472                 return 0;
473
474         if (l2_filter->l2_ref_cnt > 0)
475                 l2_filter->l2_ref_cnt--;
476
477         if (l2_filter->l2_ref_cnt > 0)
478                 return 0;
479
480         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
481
482         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
483
484         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
485
486         HWRM_CHECK_RESULT();
487         HWRM_UNLOCK();
488
489         filter->fw_l2_filter_id = UINT64_MAX;
490         if (l2_filter->l2_ref_cnt == 0) {
491                 vnic = l2_filter->vnic;
492                 if (vnic) {
493                         STAILQ_REMOVE(&vnic->filter, l2_filter,
494                                       bnxt_filter_info, next);
495                         bnxt_free_filter(bp, l2_filter);
496                 }
497         }
498
499         return 0;
500 }
501
502 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
503                          uint16_t dst_id,
504                          struct bnxt_filter_info *filter)
505 {
506         int rc = 0;
507         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
508         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
509         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
510         const struct rte_eth_vmdq_rx_conf *conf =
511                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
512         uint32_t enables = 0;
513         uint16_t j = dst_id - 1;
514
515         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
516         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
517             conf->pool_map[j].pools & (1UL << j)) {
518                 PMD_DRV_LOG(DEBUG,
519                         "Add vlan %u to vmdq pool %u\n",
520                         conf->pool_map[j].vlan_id, j);
521
522                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
523                 filter->enables |=
524                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
525                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
526         }
527
528         if (filter->fw_l2_filter_id != UINT64_MAX)
529                 bnxt_hwrm_clear_l2_filter(bp, filter);
530
531         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
532
533         req.flags = rte_cpu_to_le_32(filter->flags);
534
535         enables = filter->enables |
536               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
537         req.dst_id = rte_cpu_to_le_16(dst_id);
538
539         if (enables &
540             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
541                 memcpy(req.l2_addr, filter->l2_addr,
542                        RTE_ETHER_ADDR_LEN);
543         if (enables &
544             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
545                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
546                        RTE_ETHER_ADDR_LEN);
547         if (enables &
548             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
549                 req.l2_ovlan = filter->l2_ovlan;
550         if (enables &
551             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
552                 req.l2_ivlan = filter->l2_ivlan;
553         if (enables &
554             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
555                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
556         if (enables &
557             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
558                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
559         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
560                 req.src_id = rte_cpu_to_le_32(filter->src_id);
561         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
562                 req.src_type = filter->src_type;
563         if (filter->pri_hint) {
564                 req.pri_hint = filter->pri_hint;
565                 req.l2_filter_id_hint =
566                         rte_cpu_to_le_64(filter->l2_filter_id_hint);
567         }
568
569         req.enables = rte_cpu_to_le_32(enables);
570
571         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
572
573         HWRM_CHECK_RESULT();
574
575         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
576         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
577         HWRM_UNLOCK();
578
579         filter->l2_ref_cnt++;
580
581         return rc;
582 }
583
584 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
585 {
586         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
587         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
588         uint32_t flags = 0;
589         int rc;
590
591         if (!ptp)
592                 return 0;
593
594         HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
595
596         if (ptp->rx_filter)
597                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
598         else
599                 flags |=
600                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
601         if (ptp->tx_tstamp_en)
602                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
603         else
604                 flags |=
605                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
606         req.flags = rte_cpu_to_le_32(flags);
607         req.enables = rte_cpu_to_le_32
608                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
609         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
610
611         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
612         HWRM_UNLOCK();
613
614         return rc;
615 }
616
617 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
618 {
619         int rc = 0;
620         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
621         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
622         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
623
624         if (ptp)
625                 return 0;
626
627         HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
628
629         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
630
631         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
632
633         HWRM_CHECK_RESULT();
634
635         if (!BNXT_CHIP_THOR(bp) &&
636             !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
637                 return 0;
638
639         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
640                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
641
642         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
643         if (!ptp)
644                 return -ENOMEM;
645
646         if (!BNXT_CHIP_THOR(bp)) {
647                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
648                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
649                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
650                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
651                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
652                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
653                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
654                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
655                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
656                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
657                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
658                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
659                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
660                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
661                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
662                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
663                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
664                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
665         }
666
667         ptp->bp = bp;
668         bp->ptp_cfg = ptp;
669
670         return 0;
671 }
672
673 void bnxt_hwrm_free_vf_info(struct bnxt *bp)
674 {
675         int i;
676
677         for (i = 0; i < bp->pf->max_vfs; i++) {
678                 rte_free(bp->pf->vf_info[i].vlan_table);
679                 bp->pf->vf_info[i].vlan_table = NULL;
680                 rte_free(bp->pf->vf_info[i].vlan_as_table);
681                 bp->pf->vf_info[i].vlan_as_table = NULL;
682         }
683         rte_free(bp->pf->vf_info);
684         bp->pf->vf_info = NULL;
685 }
686
687 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
688 {
689         int rc = 0;
690         struct hwrm_func_qcaps_input req = {.req_type = 0 };
691         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
692         uint16_t new_max_vfs;
693         uint32_t flags;
694         int i;
695
696         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
697
698         req.fid = rte_cpu_to_le_16(0xffff);
699
700         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
701
702         HWRM_CHECK_RESULT();
703
704         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
705         flags = rte_le_to_cpu_32(resp->flags);
706         if (BNXT_PF(bp)) {
707                 bp->pf->port_id = resp->port_id;
708                 bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
709                 bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
710                 new_max_vfs = bp->pdev->max_vfs;
711                 if (new_max_vfs != bp->pf->max_vfs) {
712                         if (bp->pf->vf_info)
713                                 bnxt_hwrm_free_vf_info(bp);
714                         bp->pf->vf_info = rte_malloc("bnxt_vf_info",
715                             sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
716                         bp->pf->max_vfs = new_max_vfs;
717                         for (i = 0; i < new_max_vfs; i++) {
718                                 bp->pf->vf_info[i].fid =
719                                         bp->pf->first_vf_id + i;
720                                 bp->pf->vf_info[i].vlan_table =
721                                         rte_zmalloc("VF VLAN table",
722                                                     getpagesize(),
723                                                     getpagesize());
724                                 if (bp->pf->vf_info[i].vlan_table == NULL)
725                                         PMD_DRV_LOG(ERR,
726                                         "Fail to alloc VLAN table for VF %d\n",
727                                         i);
728                                 else
729                                         rte_mem_lock_page(
730                                                 bp->pf->vf_info[i].vlan_table);
731                                 bp->pf->vf_info[i].vlan_as_table =
732                                         rte_zmalloc("VF VLAN AS table",
733                                                     getpagesize(),
734                                                     getpagesize());
735                                 if (bp->pf->vf_info[i].vlan_as_table == NULL)
736                                         PMD_DRV_LOG(ERR,
737                                         "Alloc VLAN AS table for VF %d fail\n",
738                                         i);
739                                 else
740                                         rte_mem_lock_page(
741                                               bp->pf->vf_info[i].vlan_as_table);
742                                 STAILQ_INIT(&bp->pf->vf_info[i].filter);
743                         }
744                 }
745         }
746
747         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
748         if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
749                 bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
750                 memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
751         } else {
752                 bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
753         }
754         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
755         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
756         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
757         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
758         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
759         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
760         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
761         if (!BNXT_CHIP_THOR(bp))
762                 bp->max_l2_ctx += bp->max_rx_em_flows;
763         /* TODO: For now, do not support VMDq/RFS on VFs. */
764         if (BNXT_PF(bp)) {
765                 if (bp->pf->max_vfs)
766                         bp->max_vnics = 1;
767                 else
768                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
769         } else {
770                 bp->max_vnics = 1;
771         }
772         PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
773                     bp->max_l2_ctx, bp->max_vnics);
774         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
775         if (BNXT_PF(bp)) {
776                 bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
777                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
778                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
779                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
780                         HWRM_UNLOCK();
781                         bnxt_hwrm_ptp_qcfg(bp);
782                 }
783         }
784
785         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
786                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
787
788         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
789                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
790                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
791         }
792
793         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
794                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
795
796         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
797                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
798
799         HWRM_UNLOCK();
800
801         return rc;
802 }
803
804 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
805 {
806         int rc;
807
808         rc = __bnxt_hwrm_func_qcaps(bp);
809         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
810                 rc = bnxt_alloc_ctx_mem(bp);
811                 if (rc)
812                         return rc;
813
814                 rc = bnxt_hwrm_func_resc_qcaps(bp);
815                 if (!rc)
816                         bp->flags |= BNXT_FLAG_NEW_RM;
817         }
818
819         /* On older FW,
820          * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
821          * But the error can be ignored. Return success.
822          */
823
824         return 0;
825 }
826
827 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
828 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
829 {
830         int rc = 0;
831         struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
832         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
833
834         HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
835
836         req.target_id = rte_cpu_to_le_16(0xffff);
837
838         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
839
840         HWRM_CHECK_RESULT();
841
842         if (rte_le_to_cpu_32(resp->flags) &
843             HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
844                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
845                 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
846         }
847
848         bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
849
850         HWRM_UNLOCK();
851
852         return rc;
853 }
854
855 int bnxt_hwrm_func_reset(struct bnxt *bp)
856 {
857         int rc = 0;
858         struct hwrm_func_reset_input req = {.req_type = 0 };
859         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
860
861         HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
862
863         req.enables = rte_cpu_to_le_32(0);
864
865         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
866
867         HWRM_CHECK_RESULT();
868         HWRM_UNLOCK();
869
870         return rc;
871 }
872
873 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
874 {
875         int rc;
876         uint32_t flags = 0;
877         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
878         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
879
880         if (bp->flags & BNXT_FLAG_REGISTERED)
881                 return 0;
882
883         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
884                 flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
885         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
886                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
887
888         /* PFs and trusted VFs should indicate the support of the
889          * Master capability on non Stingray platform
890          */
891         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
892                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
893
894         HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
895         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
896                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
897         req.ver_maj = RTE_VER_YEAR;
898         req.ver_min = RTE_VER_MONTH;
899         req.ver_upd = RTE_VER_MINOR;
900
901         if (BNXT_PF(bp)) {
902                 req.enables |= rte_cpu_to_le_32(
903                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
904                 memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
905                        RTE_MIN(sizeof(req.vf_req_fwd),
906                                sizeof(bp->pf->vf_req_fwd)));
907
908                 /*
909                  * PF can sniff HWRM API issued by VF. This can be set up by
910                  * linux driver and inherited by the DPDK PF driver. Clear
911                  * this HWRM sniffer list in FW because DPDK PF driver does
912                  * not support this.
913                  */
914                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
915         }
916
917         req.flags = rte_cpu_to_le_32(flags);
918
919         req.async_event_fwd[0] |=
920                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
921                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
922                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
923                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
924                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
925         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
926                 req.async_event_fwd[0] |=
927                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
928         req.async_event_fwd[1] |=
929                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
930                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
931         if (BNXT_PF(bp))
932                 req.async_event_fwd[1] |=
933                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
934
935         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
936
937         HWRM_CHECK_RESULT();
938
939         flags = rte_le_to_cpu_32(resp->flags);
940         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
941                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
942
943         HWRM_UNLOCK();
944
945         bp->flags |= BNXT_FLAG_REGISTERED;
946
947         return rc;
948 }
949
950 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
951 {
952         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
953                 return 0;
954
955         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
956 }
957
958 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
959 {
960         int rc;
961         uint32_t flags = 0;
962         uint32_t enables;
963         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
964         struct hwrm_func_vf_cfg_input req = {0};
965
966         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
967
968         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
969                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
970                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
971                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
972                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
973
974         if (BNXT_HAS_RING_GRPS(bp)) {
975                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
976                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
977         }
978
979         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
980         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
981                                             AGG_RING_MULTIPLIER);
982         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
983         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
984                                               bp->tx_nr_rings +
985                                               BNXT_NUM_ASYNC_CPR(bp));
986         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
987         if (bp->vf_resv_strategy ==
988             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
989                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
990                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
991                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
992                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
993                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
994                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
995         } else if (bp->vf_resv_strategy ==
996                    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
997                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
998                 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
999         }
1000
1001         if (test)
1002                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1003                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1004                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1005                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1006                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1007                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1008
1009         if (test && BNXT_HAS_RING_GRPS(bp))
1010                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1011
1012         req.flags = rte_cpu_to_le_32(flags);
1013         req.enables |= rte_cpu_to_le_32(enables);
1014
1015         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1016
1017         if (test)
1018                 HWRM_CHECK_RESULT_SILENT();
1019         else
1020                 HWRM_CHECK_RESULT();
1021
1022         HWRM_UNLOCK();
1023         return rc;
1024 }
1025
1026 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1027 {
1028         int rc;
1029         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1030         struct hwrm_func_resource_qcaps_input req = {0};
1031
1032         HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1033         req.fid = rte_cpu_to_le_16(0xffff);
1034
1035         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1036
1037         HWRM_CHECK_RESULT_SILENT();
1038
1039         if (BNXT_VF(bp)) {
1040                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1041                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1042                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1043                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1044                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1045                 /* func_resource_qcaps does not return max_rx_em_flows.
1046                  * So use the value provided by func_qcaps.
1047                  */
1048                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1049                 if (!BNXT_CHIP_THOR(bp))
1050                         bp->max_l2_ctx += bp->max_rx_em_flows;
1051                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1052                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1053         }
1054         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1055         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1056         if (bp->vf_resv_strategy >
1057             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1058                 bp->vf_resv_strategy =
1059                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1060
1061         HWRM_UNLOCK();
1062         return rc;
1063 }
1064
1065 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1066 {
1067         int rc = 0;
1068         struct hwrm_ver_get_input req = {.req_type = 0 };
1069         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1070         uint32_t fw_version;
1071         uint16_t max_resp_len;
1072         char type[RTE_MEMZONE_NAMESIZE];
1073         uint32_t dev_caps_cfg;
1074
1075         bp->max_req_len = HWRM_MAX_REQ_LEN;
1076         bp->hwrm_cmd_timeout = timeout;
1077         HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1078
1079         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1080         req.hwrm_intf_min = HWRM_VERSION_MINOR;
1081         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1082
1083         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1084
1085         if (bp->flags & BNXT_FLAG_FW_RESET)
1086                 HWRM_CHECK_RESULT_SILENT();
1087         else
1088                 HWRM_CHECK_RESULT();
1089
1090         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
1091                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1092                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1093                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
1094         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
1095                      (resp->hwrm_fw_min_8b << 16) |
1096                      (resp->hwrm_fw_bld_8b << 8) |
1097                      resp->hwrm_fw_rsvd_8b;
1098         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1099                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1100
1101         fw_version = resp->hwrm_intf_maj_8b << 16;
1102         fw_version |= resp->hwrm_intf_min_8b << 8;
1103         fw_version |= resp->hwrm_intf_upd_8b;
1104         bp->hwrm_spec_code = fw_version;
1105
1106         /* def_req_timeout value is in milliseconds */
1107         bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1108         /* convert timeout to usec */
1109         bp->hwrm_cmd_timeout *= 1000;
1110         if (!bp->hwrm_cmd_timeout)
1111                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1112
1113         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1114                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1115                 rc = -EINVAL;
1116                 goto error;
1117         }
1118
1119         if (bp->max_req_len > resp->max_req_win_len) {
1120                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
1121                 rc = -EINVAL;
1122         }
1123         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1124         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1125         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1126                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1127
1128         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1129         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1130
1131         if (bp->max_resp_len != max_resp_len) {
1132                 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
1133                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1134                         bp->pdev->addr.devid, bp->pdev->addr.function);
1135
1136                 rte_free(bp->hwrm_cmd_resp_addr);
1137
1138                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
1139                 if (bp->hwrm_cmd_resp_addr == NULL) {
1140                         rc = -ENOMEM;
1141                         goto error;
1142                 }
1143                 bp->hwrm_cmd_resp_dma_addr =
1144                         rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
1145                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1146                         PMD_DRV_LOG(ERR,
1147                         "Unable to map response buffer to physical memory.\n");
1148                         rc = -ENOMEM;
1149                         goto error;
1150                 }
1151                 bp->max_resp_len = max_resp_len;
1152         }
1153
1154         if ((dev_caps_cfg &
1155                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1156             (dev_caps_cfg &
1157              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1158                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1159                 bp->flags |= BNXT_FLAG_SHORT_CMD;
1160         }
1161
1162         if (((dev_caps_cfg &
1163               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1164              (dev_caps_cfg &
1165               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1166             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1167                 sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1168                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1169                         bp->pdev->addr.devid, bp->pdev->addr.function);
1170
1171                 rte_free(bp->hwrm_short_cmd_req_addr);
1172
1173                 bp->hwrm_short_cmd_req_addr =
1174                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1175                 if (bp->hwrm_short_cmd_req_addr == NULL) {
1176                         rc = -ENOMEM;
1177                         goto error;
1178                 }
1179                 bp->hwrm_short_cmd_req_dma_addr =
1180                         rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1181                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1182                         rte_free(bp->hwrm_short_cmd_req_addr);
1183                         PMD_DRV_LOG(ERR,
1184                                 "Unable to map buffer to physical memory.\n");
1185                         rc = -ENOMEM;
1186                         goto error;
1187                 }
1188         }
1189         if (dev_caps_cfg &
1190             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1191                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1192                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1193         }
1194         if (dev_caps_cfg &
1195             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1196                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1197         if (dev_caps_cfg &
1198             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1199                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1200                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1201         }
1202
1203         if (dev_caps_cfg &
1204             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1205                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1206                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1207         }
1208
1209
1210 error:
1211         HWRM_UNLOCK();
1212         return rc;
1213 }
1214
1215 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1216 {
1217         int rc;
1218         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1219         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1220
1221         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1222                 return 0;
1223
1224         HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1225         req.flags = flags;
1226
1227         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1228
1229         HWRM_CHECK_RESULT();
1230         HWRM_UNLOCK();
1231
1232         return rc;
1233 }
1234
1235 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1236 {
1237         int rc = 0;
1238         struct hwrm_port_phy_cfg_input req = {0};
1239         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1240         uint32_t enables = 0;
1241
1242         HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1243
1244         if (conf->link_up) {
1245                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1246                 if (bp->link_info->auto_mode && conf->link_speed) {
1247                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1248                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1249                 }
1250
1251                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1252                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
1253                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1254                 /*
1255                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1256                  * any auto mode, even "none".
1257                  */
1258                 if (!conf->link_speed) {
1259                         /* No speeds specified. Enable AutoNeg - all speeds */
1260                         req.auto_mode =
1261                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1262                 }
1263                 /* AutoNeg - Advertise speeds specified. */
1264                 if (conf->auto_link_speed_mask &&
1265                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1266                         req.auto_mode =
1267                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1268                         req.auto_link_speed_mask =
1269                                 conf->auto_link_speed_mask;
1270                         enables |=
1271                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
1272                 }
1273
1274                 req.auto_duplex = conf->duplex;
1275                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1276                 req.auto_pause = conf->auto_pause;
1277                 req.force_pause = conf->force_pause;
1278                 /* Set force_pause if there is no auto or if there is a force */
1279                 if (req.auto_pause && !req.force_pause)
1280                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1281                 else
1282                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1283
1284                 req.enables = rte_cpu_to_le_32(enables);
1285         } else {
1286                 req.flags =
1287                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1288                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1289         }
1290
1291         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1292
1293         HWRM_CHECK_RESULT();
1294         HWRM_UNLOCK();
1295
1296         return rc;
1297 }
1298
1299 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1300                                    struct bnxt_link_info *link_info)
1301 {
1302         int rc = 0;
1303         struct hwrm_port_phy_qcfg_input req = {0};
1304         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1305
1306         HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1307
1308         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1309
1310         HWRM_CHECK_RESULT();
1311
1312         link_info->phy_link_status = resp->link;
1313         link_info->link_up =
1314                 (link_info->phy_link_status ==
1315                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1316         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1317         link_info->duplex = resp->duplex_cfg;
1318         link_info->pause = resp->pause;
1319         link_info->auto_pause = resp->auto_pause;
1320         link_info->force_pause = resp->force_pause;
1321         link_info->auto_mode = resp->auto_mode;
1322         link_info->phy_type = resp->phy_type;
1323         link_info->media_type = resp->media_type;
1324
1325         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1326         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1327         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1328         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1329         link_info->phy_ver[0] = resp->phy_maj;
1330         link_info->phy_ver[1] = resp->phy_min;
1331         link_info->phy_ver[2] = resp->phy_bld;
1332
1333         HWRM_UNLOCK();
1334
1335         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1336         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1337         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1338         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1339         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1340                     link_info->auto_link_speed_mask);
1341         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1342                     link_info->force_link_speed);
1343
1344         return rc;
1345 }
1346
1347 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1348 {
1349         int rc = 0;
1350         struct hwrm_port_phy_qcaps_input req = {0};
1351         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1352
1353         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1354                 return 0;
1355
1356         HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1357
1358         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1359
1360         HWRM_CHECK_RESULT();
1361
1362         bp->port_cnt = resp->port_cnt;
1363
1364         HWRM_UNLOCK();
1365
1366         return 0;
1367 }
1368
1369 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1370 {
1371         int i = 0;
1372
1373         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1374                 if (bp->tx_cos_queue[i].profile ==
1375                     HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1376                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1377                         return true;
1378                 }
1379         }
1380         return false;
1381 }
1382
1383 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1384 {
1385         int i = 0;
1386
1387         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1388                 if (bp->tx_cos_queue[i].profile !=
1389                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1390                     bp->tx_cos_queue[i].id !=
1391                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1392                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1393                         break;
1394                 }
1395         }
1396 }
1397
1398 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1399 {
1400         int rc = 0;
1401         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1402         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1403         uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1404         int i;
1405
1406 get_rx_info:
1407         HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1408
1409         req.flags = rte_cpu_to_le_32(dir);
1410         /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1411         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1412             !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1413                 req.drv_qmap_cap =
1414                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1415         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1416
1417         HWRM_CHECK_RESULT();
1418
1419         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1420                 GET_TX_QUEUE_INFO(0);
1421                 GET_TX_QUEUE_INFO(1);
1422                 GET_TX_QUEUE_INFO(2);
1423                 GET_TX_QUEUE_INFO(3);
1424                 GET_TX_QUEUE_INFO(4);
1425                 GET_TX_QUEUE_INFO(5);
1426                 GET_TX_QUEUE_INFO(6);
1427                 GET_TX_QUEUE_INFO(7);
1428         } else  {
1429                 GET_RX_QUEUE_INFO(0);
1430                 GET_RX_QUEUE_INFO(1);
1431                 GET_RX_QUEUE_INFO(2);
1432                 GET_RX_QUEUE_INFO(3);
1433                 GET_RX_QUEUE_INFO(4);
1434                 GET_RX_QUEUE_INFO(5);
1435                 GET_RX_QUEUE_INFO(6);
1436                 GET_RX_QUEUE_INFO(7);
1437         }
1438
1439         HWRM_UNLOCK();
1440
1441         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1442                 goto done;
1443
1444         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1445                 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1446         } else {
1447                 int j;
1448
1449                 /* iterate and find the COSq profile to use for Tx */
1450                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1451                         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1452                                 if (bp->tx_cos_queue[i].id != 0xff)
1453                                         bp->tx_cosq_id[j++] =
1454                                                 bp->tx_cos_queue[i].id;
1455                         }
1456                 } else {
1457                         /* When CoS classification is disabled, for normal NIC
1458                          * operations, ideally we should look to use LOSSY.
1459                          * If not found, fallback to the first valid profile
1460                          */
1461                         if (!bnxt_find_lossy_profile(bp))
1462                                 bnxt_find_first_valid_profile(bp);
1463
1464                 }
1465         }
1466
1467         bp->max_tc = resp->max_configurable_queues;
1468         bp->max_lltc = resp->max_configurable_lossless_queues;
1469         if (bp->max_tc > BNXT_MAX_QUEUE)
1470                 bp->max_tc = BNXT_MAX_QUEUE;
1471         bp->max_q = bp->max_tc;
1472
1473         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1474                 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1475                 goto get_rx_info;
1476         }
1477
1478 done:
1479         return rc;
1480 }
1481
1482 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1483                          struct bnxt_ring *ring,
1484                          uint32_t ring_type, uint32_t map_index,
1485                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1486                          uint16_t tx_cosq_id)
1487 {
1488         int rc = 0;
1489         uint32_t enables = 0;
1490         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1491         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1492         struct rte_mempool *mb_pool;
1493         uint16_t rx_buf_size;
1494
1495         HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1496
1497         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1498         req.fbo = rte_cpu_to_le_32(0);
1499         /* Association of ring index with doorbell index */
1500         req.logical_id = rte_cpu_to_le_16(map_index);
1501         req.length = rte_cpu_to_le_32(ring->ring_size);
1502
1503         switch (ring_type) {
1504         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1505                 req.ring_type = ring_type;
1506                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1507                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1508                 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1509                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1510                         enables |=
1511                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1512                 break;
1513         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1514                 req.ring_type = ring_type;
1515                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1516                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1517                 if (BNXT_CHIP_THOR(bp)) {
1518                         mb_pool = bp->rx_queues[0]->mb_pool;
1519                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1520                                       RTE_PKTMBUF_HEADROOM;
1521                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1522                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1523                         enables |=
1524                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1525                 }
1526                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1527                         enables |=
1528                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1529                 break;
1530         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1531                 req.ring_type = ring_type;
1532                 if (BNXT_HAS_NQ(bp)) {
1533                         /* Association of cp ring with nq */
1534                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1535                         enables |=
1536                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1537                 }
1538                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1539                 break;
1540         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1541                 req.ring_type = ring_type;
1542                 req.page_size = BNXT_PAGE_SHFT;
1543                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1544                 break;
1545         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1546                 req.ring_type = ring_type;
1547                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1548
1549                 mb_pool = bp->rx_queues[0]->mb_pool;
1550                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1551                               RTE_PKTMBUF_HEADROOM;
1552                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1553                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1554
1555                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1556                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1557                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1558                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1559                 break;
1560         default:
1561                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1562                         ring_type);
1563                 HWRM_UNLOCK();
1564                 return -EINVAL;
1565         }
1566         req.enables = rte_cpu_to_le_32(enables);
1567
1568         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1569
1570         if (rc || resp->error_code) {
1571                 if (rc == 0 && resp->error_code)
1572                         rc = rte_le_to_cpu_16(resp->error_code);
1573                 switch (ring_type) {
1574                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1575                         PMD_DRV_LOG(ERR,
1576                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1577                         HWRM_UNLOCK();
1578                         return rc;
1579                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1580                         PMD_DRV_LOG(ERR,
1581                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1582                         HWRM_UNLOCK();
1583                         return rc;
1584                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1585                         PMD_DRV_LOG(ERR,
1586                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1587                                     rc);
1588                         HWRM_UNLOCK();
1589                         return rc;
1590                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1591                         PMD_DRV_LOG(ERR,
1592                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1593                         HWRM_UNLOCK();
1594                         return rc;
1595                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1596                         PMD_DRV_LOG(ERR,
1597                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1598                         HWRM_UNLOCK();
1599                         return rc;
1600                 default:
1601                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1602                         HWRM_UNLOCK();
1603                         return rc;
1604                 }
1605         }
1606
1607         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1608         HWRM_UNLOCK();
1609         return rc;
1610 }
1611
1612 int bnxt_hwrm_ring_free(struct bnxt *bp,
1613                         struct bnxt_ring *ring, uint32_t ring_type)
1614 {
1615         int rc;
1616         struct hwrm_ring_free_input req = {.req_type = 0 };
1617         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1618
1619         HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1620
1621         req.ring_type = ring_type;
1622         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1623
1624         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1625
1626         if (rc || resp->error_code) {
1627                 if (rc == 0 && resp->error_code)
1628                         rc = rte_le_to_cpu_16(resp->error_code);
1629                 HWRM_UNLOCK();
1630
1631                 switch (ring_type) {
1632                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1633                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1634                                 rc);
1635                         return rc;
1636                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1637                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1638                                 rc);
1639                         return rc;
1640                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1641                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1642                                 rc);
1643                         return rc;
1644                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1645                         PMD_DRV_LOG(ERR,
1646                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1647                         return rc;
1648                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1649                         PMD_DRV_LOG(ERR,
1650                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1651                         return rc;
1652                 default:
1653                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1654                         return rc;
1655                 }
1656         }
1657         HWRM_UNLOCK();
1658         return 0;
1659 }
1660
1661 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1662 {
1663         int rc = 0;
1664         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1665         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1666
1667         HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1668
1669         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1670         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1671         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1672         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1673
1674         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1675
1676         HWRM_CHECK_RESULT();
1677
1678         bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1679
1680         HWRM_UNLOCK();
1681
1682         return rc;
1683 }
1684
1685 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1686 {
1687         int rc;
1688         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1689         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1690
1691         HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1692
1693         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1694
1695         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1696
1697         HWRM_CHECK_RESULT();
1698         HWRM_UNLOCK();
1699
1700         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1701         return rc;
1702 }
1703
1704 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1705 {
1706         int rc = 0;
1707         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1708         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1709
1710         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1711                 return rc;
1712
1713         HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1714
1715         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1716
1717         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1718
1719         HWRM_CHECK_RESULT();
1720         HWRM_UNLOCK();
1721
1722         return rc;
1723 }
1724
1725 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1726                                 unsigned int idx __rte_unused)
1727 {
1728         int rc;
1729         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1730         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1731
1732         HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1733
1734         req.update_period_ms = rte_cpu_to_le_32(0);
1735
1736         req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1737
1738         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1739
1740         HWRM_CHECK_RESULT();
1741
1742         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1743
1744         HWRM_UNLOCK();
1745
1746         return rc;
1747 }
1748
1749 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1750                                 unsigned int idx __rte_unused)
1751 {
1752         int rc;
1753         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1754         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1755
1756         HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1757
1758         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1759
1760         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1761
1762         HWRM_CHECK_RESULT();
1763         HWRM_UNLOCK();
1764
1765         return rc;
1766 }
1767
1768 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1769 {
1770         int rc = 0, i, j;
1771         struct hwrm_vnic_alloc_input req = { 0 };
1772         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1773
1774         if (!BNXT_HAS_RING_GRPS(bp))
1775                 goto skip_ring_grps;
1776
1777         /* map ring groups to this vnic */
1778         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1779                 vnic->start_grp_id, vnic->end_grp_id);
1780         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1781                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1782
1783         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1784         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1785         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1786         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1787
1788 skip_ring_grps:
1789         vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
1790         HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1791
1792         if (vnic->func_default)
1793                 req.flags =
1794                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1795         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1796
1797         HWRM_CHECK_RESULT();
1798
1799         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1800         HWRM_UNLOCK();
1801         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1802         return rc;
1803 }
1804
1805 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1806                                         struct bnxt_vnic_info *vnic,
1807                                         struct bnxt_plcmodes_cfg *pmode)
1808 {
1809         int rc = 0;
1810         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1811         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1812
1813         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1814
1815         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1816
1817         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1818
1819         HWRM_CHECK_RESULT();
1820
1821         pmode->flags = rte_le_to_cpu_32(resp->flags);
1822         /* dflt_vnic bit doesn't exist in the _cfg command */
1823         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1824         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1825         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1826         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1827
1828         HWRM_UNLOCK();
1829
1830         return rc;
1831 }
1832
1833 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1834                                        struct bnxt_vnic_info *vnic,
1835                                        struct bnxt_plcmodes_cfg *pmode)
1836 {
1837         int rc = 0;
1838         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1839         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1840
1841         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1842                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1843                 return rc;
1844         }
1845
1846         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1847
1848         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1849         req.flags = rte_cpu_to_le_32(pmode->flags);
1850         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1851         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1852         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1853         req.enables = rte_cpu_to_le_32(
1854             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1855             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1856             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1857         );
1858
1859         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1860
1861         HWRM_CHECK_RESULT();
1862         HWRM_UNLOCK();
1863
1864         return rc;
1865 }
1866
1867 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1868 {
1869         int rc = 0;
1870         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1871         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1872         struct bnxt_plcmodes_cfg pmodes = { 0 };
1873         uint32_t ctx_enable_flag = 0;
1874         uint32_t enables = 0;
1875
1876         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1877                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1878                 return rc;
1879         }
1880
1881         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1882         if (rc)
1883                 return rc;
1884
1885         HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
1886
1887         if (BNXT_CHIP_THOR(bp)) {
1888                 int dflt_rxq = vnic->start_grp_id;
1889                 struct bnxt_rx_ring_info *rxr;
1890                 struct bnxt_cp_ring_info *cpr;
1891                 struct bnxt_rx_queue *rxq;
1892                 int i;
1893
1894                 /*
1895                  * The first active receive ring is used as the VNIC
1896                  * default receive ring. If there are no active receive
1897                  * rings (all corresponding receive queues are stopped),
1898                  * the first receive ring is used.
1899                  */
1900                 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
1901                         rxq = bp->eth_dev->data->rx_queues[i];
1902                         if (rxq->rx_started) {
1903                                 dflt_rxq = i;
1904                                 break;
1905                         }
1906                 }
1907
1908                 rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
1909                 rxr = rxq->rx_ring;
1910                 cpr = rxq->cp_ring;
1911
1912                 req.default_rx_ring_id =
1913                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1914                 req.default_cmpl_ring_id =
1915                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1916                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1917                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1918                 goto config_mru;
1919         }
1920
1921         /* Only RSS support for now TBD: COS & LB */
1922         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1923         if (vnic->lb_rule != 0xffff)
1924                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1925         if (vnic->cos_rule != 0xffff)
1926                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1927         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1928                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1929                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1930         }
1931         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1932                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
1933                 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
1934         }
1935
1936         enables |= ctx_enable_flag;
1937         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1938         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1939         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1940         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1941
1942 config_mru:
1943         req.enables = rte_cpu_to_le_32(enables);
1944         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1945         req.mru = rte_cpu_to_le_16(vnic->mru);
1946         /* Configure default VNIC only once. */
1947         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1948                 req.flags |=
1949                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1950                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1951         }
1952         if (vnic->vlan_strip)
1953                 req.flags |=
1954                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1955         if (vnic->bd_stall)
1956                 req.flags |=
1957                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1958         if (vnic->roce_dual)
1959                 req.flags |= rte_cpu_to_le_32(
1960                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1961         if (vnic->roce_only)
1962                 req.flags |= rte_cpu_to_le_32(
1963                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1964         if (vnic->rss_dflt_cr)
1965                 req.flags |= rte_cpu_to_le_32(
1966                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1967
1968         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1969
1970         HWRM_CHECK_RESULT();
1971         HWRM_UNLOCK();
1972
1973         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1974
1975         return rc;
1976 }
1977
1978 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1979                 int16_t fw_vf_id)
1980 {
1981         int rc = 0;
1982         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1983         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1984
1985         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1986                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1987                 return rc;
1988         }
1989         HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
1990
1991         req.enables =
1992                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1993         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1994         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1995
1996         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1997
1998         HWRM_CHECK_RESULT();
1999
2000         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2001         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2002         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2003         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2004         vnic->mru = rte_le_to_cpu_16(resp->mru);
2005         vnic->func_default = rte_le_to_cpu_32(
2006                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2007         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2008                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2009         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2010                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2011         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
2012                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
2013         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
2014                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
2015         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2016                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2017
2018         HWRM_UNLOCK();
2019
2020         return rc;
2021 }
2022
2023 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2024                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2025 {
2026         int rc = 0;
2027         uint16_t ctx_id;
2028         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2029         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2030                                                 bp->hwrm_cmd_resp_addr;
2031
2032         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2033
2034         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2035         HWRM_CHECK_RESULT();
2036
2037         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2038         if (!BNXT_HAS_RING_GRPS(bp))
2039                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
2040         else if (ctx_idx == 0)
2041                 vnic->rss_rule = ctx_id;
2042
2043         HWRM_UNLOCK();
2044
2045         return rc;
2046 }
2047
2048 static
2049 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2050                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2051 {
2052         int rc = 0;
2053         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2054         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2055                                                 bp->hwrm_cmd_resp_addr;
2056
2057         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2058                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2059                 return rc;
2060         }
2061         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2062
2063         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2064
2065         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2066
2067         HWRM_CHECK_RESULT();
2068         HWRM_UNLOCK();
2069
2070         return rc;
2071 }
2072
2073 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2074 {
2075         int rc = 0;
2076
2077         if (BNXT_CHIP_THOR(bp)) {
2078                 int j;
2079
2080                 for (j = 0; j < vnic->num_lb_ctxts; j++) {
2081                         rc = _bnxt_hwrm_vnic_ctx_free(bp,
2082                                                       vnic,
2083                                                       vnic->fw_grp_ids[j]);
2084                         vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2085                 }
2086                 vnic->num_lb_ctxts = 0;
2087         } else {
2088                 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2089                 vnic->rss_rule = INVALID_HW_RING_ID;
2090         }
2091
2092         return rc;
2093 }
2094
2095 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2096 {
2097         int rc = 0;
2098         struct hwrm_vnic_free_input req = {.req_type = 0 };
2099         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2100
2101         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2102                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2103                 return rc;
2104         }
2105
2106         HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2107
2108         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2109
2110         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2111
2112         HWRM_CHECK_RESULT();
2113         HWRM_UNLOCK();
2114
2115         vnic->fw_vnic_id = INVALID_HW_RING_ID;
2116         /* Configure default VNIC again if necessary. */
2117         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2118                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2119
2120         return rc;
2121 }
2122
2123 static int
2124 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2125 {
2126         int i;
2127         int rc = 0;
2128         int nr_ctxs = vnic->num_lb_ctxts;
2129         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2130         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2131
2132         for (i = 0; i < nr_ctxs; i++) {
2133                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2134
2135                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2136                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2137                 req.hash_mode_flags = vnic->hash_mode;
2138
2139                 req.hash_key_tbl_addr =
2140                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2141
2142                 req.ring_grp_tbl_addr =
2143                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2144                                          i * HW_HASH_INDEX_SIZE);
2145                 req.ring_table_pair_index = i;
2146                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2147
2148                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2149                                             BNXT_USE_CHIMP_MB);
2150
2151                 HWRM_CHECK_RESULT();
2152                 HWRM_UNLOCK();
2153         }
2154
2155         return rc;
2156 }
2157
2158 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2159                            struct bnxt_vnic_info *vnic)
2160 {
2161         int rc = 0;
2162         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2163         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2164
2165         if (!vnic->rss_table)
2166                 return 0;
2167
2168         if (BNXT_CHIP_THOR(bp))
2169                 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
2170
2171         HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2172
2173         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2174         req.hash_mode_flags = vnic->hash_mode;
2175
2176         req.ring_grp_tbl_addr =
2177             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2178         req.hash_key_tbl_addr =
2179             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2180         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2181         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2182
2183         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2184
2185         HWRM_CHECK_RESULT();
2186         HWRM_UNLOCK();
2187
2188         return rc;
2189 }
2190
2191 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2192                         struct bnxt_vnic_info *vnic)
2193 {
2194         int rc = 0;
2195         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2196         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2197         uint16_t size;
2198
2199         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2200                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2201                 return rc;
2202         }
2203
2204         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2205
2206         req.flags = rte_cpu_to_le_32(
2207                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2208
2209         req.enables = rte_cpu_to_le_32(
2210                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2211
2212         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2213         size -= RTE_PKTMBUF_HEADROOM;
2214         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2215
2216         req.jumbo_thresh = rte_cpu_to_le_16(size);
2217         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2218
2219         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2220
2221         HWRM_CHECK_RESULT();
2222         HWRM_UNLOCK();
2223
2224         return rc;
2225 }
2226
2227 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2228                         struct bnxt_vnic_info *vnic, bool enable)
2229 {
2230         int rc = 0;
2231         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2232         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2233
2234         if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
2235                 if (enable)
2236                         PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2237                 return -ENOTSUP;
2238         }
2239
2240         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2241                 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2242                 return 0;
2243         }
2244
2245         HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2246
2247         if (enable) {
2248                 req.enables = rte_cpu_to_le_32(
2249                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2250                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2251                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2252                 req.flags = rte_cpu_to_le_32(
2253                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2254                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2255                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2256                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2257                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2258                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2259                 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2260                 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2261                 req.min_agg_len = rte_cpu_to_le_32(512);
2262         }
2263         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2264
2265         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2266
2267         HWRM_CHECK_RESULT();
2268         HWRM_UNLOCK();
2269
2270         return rc;
2271 }
2272
2273 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2274 {
2275         struct hwrm_func_cfg_input req = {0};
2276         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2277         int rc;
2278
2279         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2280         req.enables = rte_cpu_to_le_32(
2281                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2282         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2283         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2284
2285         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2286
2287         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2288         HWRM_CHECK_RESULT();
2289         HWRM_UNLOCK();
2290
2291         bp->pf->vf_info[vf].random_mac = false;
2292
2293         return rc;
2294 }
2295
2296 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2297                                   uint64_t *dropped)
2298 {
2299         int rc = 0;
2300         struct hwrm_func_qstats_input req = {.req_type = 0};
2301         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2302
2303         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2304
2305         req.fid = rte_cpu_to_le_16(fid);
2306
2307         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2308
2309         HWRM_CHECK_RESULT();
2310
2311         if (dropped)
2312                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2313
2314         HWRM_UNLOCK();
2315
2316         return rc;
2317 }
2318
2319 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2320                           struct rte_eth_stats *stats,
2321                           struct hwrm_func_qstats_output *func_qstats)
2322 {
2323         int rc = 0;
2324         struct hwrm_func_qstats_input req = {.req_type = 0};
2325         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2326
2327         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2328
2329         req.fid = rte_cpu_to_le_16(fid);
2330
2331         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2332
2333         HWRM_CHECK_RESULT();
2334         if (func_qstats)
2335                 memcpy(func_qstats, resp,
2336                        sizeof(struct hwrm_func_qstats_output));
2337
2338         if (!stats)
2339                 goto exit;
2340
2341         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2342         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2343         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2344         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2345         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2346         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2347
2348         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2349         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2350         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2351         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2352         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2353         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2354
2355         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2356         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2357         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2358
2359 exit:
2360         HWRM_UNLOCK();
2361
2362         return rc;
2363 }
2364
2365 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2366 {
2367         int rc = 0;
2368         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2369         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2370
2371         HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2372
2373         req.fid = rte_cpu_to_le_16(fid);
2374
2375         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2376
2377         HWRM_CHECK_RESULT();
2378         HWRM_UNLOCK();
2379
2380         return rc;
2381 }
2382
2383 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2384 {
2385         unsigned int i;
2386         int rc = 0;
2387
2388         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2389                 struct bnxt_tx_queue *txq;
2390                 struct bnxt_rx_queue *rxq;
2391                 struct bnxt_cp_ring_info *cpr;
2392
2393                 if (i >= bp->rx_cp_nr_rings) {
2394                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2395                         cpr = txq->cp_ring;
2396                 } else {
2397                         rxq = bp->rx_queues[i];
2398                         cpr = rxq->cp_ring;
2399                 }
2400
2401                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2402                 if (rc)
2403                         return rc;
2404         }
2405         return 0;
2406 }
2407
2408 static int
2409 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2410 {
2411         int rc;
2412         unsigned int i;
2413         struct bnxt_cp_ring_info *cpr;
2414
2415         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2416
2417                 if (i >= bp->rx_cp_nr_rings) {
2418                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2419                 } else {
2420                         cpr = bp->rx_queues[i]->cp_ring;
2421                         if (BNXT_HAS_RING_GRPS(bp))
2422                                 bp->grp_info[i].fw_stats_ctx = -1;
2423                 }
2424                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2425                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2426                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2427                         if (rc)
2428                                 return rc;
2429                 }
2430         }
2431         return 0;
2432 }
2433
2434 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2435 {
2436         unsigned int i;
2437         int rc = 0;
2438
2439         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2440                 struct bnxt_tx_queue *txq;
2441                 struct bnxt_rx_queue *rxq;
2442                 struct bnxt_cp_ring_info *cpr;
2443
2444                 if (i >= bp->rx_cp_nr_rings) {
2445                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2446                         cpr = txq->cp_ring;
2447                 } else {
2448                         rxq = bp->rx_queues[i];
2449                         cpr = rxq->cp_ring;
2450                 }
2451
2452                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2453
2454                 if (rc)
2455                         return rc;
2456         }
2457         return rc;
2458 }
2459
2460 static int
2461 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2462 {
2463         uint16_t idx;
2464         uint32_t rc = 0;
2465
2466         if (!BNXT_HAS_RING_GRPS(bp))
2467                 return 0;
2468
2469         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2470
2471                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2472                         continue;
2473
2474                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2475
2476                 if (rc)
2477                         return rc;
2478         }
2479         return rc;
2480 }
2481
2482 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2483 {
2484         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2485
2486         bnxt_hwrm_ring_free(bp, cp_ring,
2487                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2488         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2489         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2490                                      sizeof(*cpr->cp_desc_ring));
2491         cpr->cp_raw_cons = 0;
2492         cpr->valid = 0;
2493 }
2494
2495 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2496 {
2497         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2498
2499         bnxt_hwrm_ring_free(bp, cp_ring,
2500                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2501         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2502         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2503                         sizeof(*cpr->cp_desc_ring));
2504         cpr->cp_raw_cons = 0;
2505         cpr->valid = 0;
2506 }
2507
2508 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2509 {
2510         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2511         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2512         struct bnxt_ring *ring = rxr->rx_ring_struct;
2513         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2514
2515         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2516                 bnxt_hwrm_ring_free(bp, ring,
2517                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2518                 ring->fw_ring_id = INVALID_HW_RING_ID;
2519                 if (BNXT_HAS_RING_GRPS(bp))
2520                         bp->grp_info[queue_index].rx_fw_ring_id =
2521                                                         INVALID_HW_RING_ID;
2522         }
2523         ring = rxr->ag_ring_struct;
2524         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2525                 bnxt_hwrm_ring_free(bp, ring,
2526                                     BNXT_CHIP_THOR(bp) ?
2527                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2528                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2529                 if (BNXT_HAS_RING_GRPS(bp))
2530                         bp->grp_info[queue_index].ag_fw_ring_id =
2531                                                         INVALID_HW_RING_ID;
2532         }
2533         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2534                 bnxt_free_cp_ring(bp, cpr);
2535
2536         if (BNXT_HAS_RING_GRPS(bp))
2537                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2538 }
2539
2540 static int
2541 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2542 {
2543         unsigned int i;
2544
2545         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2546                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2547                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2548                 struct bnxt_ring *ring = txr->tx_ring_struct;
2549                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2550
2551                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2552                         bnxt_hwrm_ring_free(bp, ring,
2553                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2554                         ring->fw_ring_id = INVALID_HW_RING_ID;
2555                         memset(txr->tx_desc_ring, 0,
2556                                         txr->tx_ring_struct->ring_size *
2557                                         sizeof(*txr->tx_desc_ring));
2558                         memset(txr->tx_buf_ring, 0,
2559                                         txr->tx_ring_struct->ring_size *
2560                                         sizeof(*txr->tx_buf_ring));
2561                         txr->tx_prod = 0;
2562                         txr->tx_cons = 0;
2563                 }
2564                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2565                         bnxt_free_cp_ring(bp, cpr);
2566                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2567                 }
2568         }
2569
2570         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2571                 bnxt_free_hwrm_rx_ring(bp, i);
2572
2573         return 0;
2574 }
2575
2576 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2577 {
2578         uint16_t i;
2579         uint32_t rc = 0;
2580
2581         if (!BNXT_HAS_RING_GRPS(bp))
2582                 return 0;
2583
2584         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2585                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2586                 if (rc)
2587                         return rc;
2588         }
2589         return rc;
2590 }
2591
2592 /*
2593  * HWRM utility functions
2594  */
2595
2596 void bnxt_free_hwrm_resources(struct bnxt *bp)
2597 {
2598         /* Release memzone */
2599         rte_free(bp->hwrm_cmd_resp_addr);
2600         rte_free(bp->hwrm_short_cmd_req_addr);
2601         bp->hwrm_cmd_resp_addr = NULL;
2602         bp->hwrm_short_cmd_req_addr = NULL;
2603         bp->hwrm_cmd_resp_dma_addr = 0;
2604         bp->hwrm_short_cmd_req_dma_addr = 0;
2605 }
2606
2607 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2608 {
2609         struct rte_pci_device *pdev = bp->pdev;
2610         char type[RTE_MEMZONE_NAMESIZE];
2611
2612         sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2613                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2614         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2615         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2616         if (bp->hwrm_cmd_resp_addr == NULL)
2617                 return -ENOMEM;
2618         bp->hwrm_cmd_resp_dma_addr =
2619                 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2620         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2621                 PMD_DRV_LOG(ERR,
2622                         "unable to map response address to physical memory\n");
2623                 return -ENOMEM;
2624         }
2625         rte_spinlock_init(&bp->hwrm_lock);
2626
2627         return 0;
2628 }
2629
2630 int
2631 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2632 {
2633         int rc = 0;
2634
2635         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2636                 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2637                 if (rc)
2638                         return rc;
2639         } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2640                 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2641                 if (rc)
2642                         return rc;
2643         }
2644
2645         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2646         return rc;
2647 }
2648
2649 static int
2650 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2651 {
2652         struct bnxt_filter_info *filter;
2653         int rc = 0;
2654
2655         STAILQ_FOREACH(filter, &vnic->filter, next) {
2656                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2657                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2658                 bnxt_free_filter(bp, filter);
2659         }
2660         return rc;
2661 }
2662
2663 static int
2664 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2665 {
2666         struct bnxt_filter_info *filter;
2667         struct rte_flow *flow;
2668         int rc = 0;
2669
2670         while (!STAILQ_EMPTY(&vnic->flow_list)) {
2671                 flow = STAILQ_FIRST(&vnic->flow_list);
2672                 filter = flow->filter;
2673                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2674                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2675
2676                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2677                 rte_free(flow);
2678         }
2679         return rc;
2680 }
2681
2682 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2683 {
2684         struct bnxt_filter_info *filter;
2685         int rc = 0;
2686
2687         STAILQ_FOREACH(filter, &vnic->filter, next) {
2688                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2689                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2690                                                      filter);
2691                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2692                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2693                                                          filter);
2694                 else
2695                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2696                                                      filter);
2697                 if (rc)
2698                         break;
2699         }
2700         return rc;
2701 }
2702
2703 static void
2704 bnxt_free_tunnel_ports(struct bnxt *bp)
2705 {
2706         if (bp->vxlan_port_cnt)
2707                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2708                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2709         bp->vxlan_port = 0;
2710         if (bp->geneve_port_cnt)
2711                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2712                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2713         bp->geneve_port = 0;
2714 }
2715
2716 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2717 {
2718         int i;
2719
2720         if (bp->vnic_info == NULL)
2721                 return;
2722
2723         /*
2724          * Cleanup VNICs in reverse order, to make sure the L2 filter
2725          * from vnic0 is last to be cleaned up.
2726          */
2727         for (i = bp->max_vnics - 1; i >= 0; i--) {
2728                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2729
2730                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2731                         continue;
2732
2733                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2734
2735                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2736
2737                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2738
2739                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2740
2741                 bnxt_hwrm_vnic_free(bp, vnic);
2742
2743                 rte_free(vnic->fw_grp_ids);
2744         }
2745         /* Ring resources */
2746         bnxt_free_all_hwrm_rings(bp);
2747         bnxt_free_all_hwrm_ring_grps(bp);
2748         bnxt_free_all_hwrm_stat_ctxs(bp);
2749         bnxt_free_tunnel_ports(bp);
2750 }
2751
2752 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2753 {
2754         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2755
2756         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2757                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2758
2759         switch (conf_link_speed) {
2760         case ETH_LINK_SPEED_10M_HD:
2761         case ETH_LINK_SPEED_100M_HD:
2762                 /* FALLTHROUGH */
2763                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2764         }
2765         return hw_link_duplex;
2766 }
2767
2768 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2769 {
2770         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2771 }
2772
2773 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2774 {
2775         uint16_t eth_link_speed = 0;
2776
2777         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2778                 return ETH_LINK_SPEED_AUTONEG;
2779
2780         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2781         case ETH_LINK_SPEED_100M:
2782         case ETH_LINK_SPEED_100M_HD:
2783                 /* FALLTHROUGH */
2784                 eth_link_speed =
2785                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2786                 break;
2787         case ETH_LINK_SPEED_1G:
2788                 eth_link_speed =
2789                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2790                 break;
2791         case ETH_LINK_SPEED_2_5G:
2792                 eth_link_speed =
2793                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2794                 break;
2795         case ETH_LINK_SPEED_10G:
2796                 eth_link_speed =
2797                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2798                 break;
2799         case ETH_LINK_SPEED_20G:
2800                 eth_link_speed =
2801                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2802                 break;
2803         case ETH_LINK_SPEED_25G:
2804                 eth_link_speed =
2805                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2806                 break;
2807         case ETH_LINK_SPEED_40G:
2808                 eth_link_speed =
2809                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2810                 break;
2811         case ETH_LINK_SPEED_50G:
2812                 eth_link_speed =
2813                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2814                 break;
2815         case ETH_LINK_SPEED_100G:
2816                 eth_link_speed =
2817                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2818                 break;
2819         case ETH_LINK_SPEED_200G:
2820                 eth_link_speed =
2821                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_200GB;
2822                 break;
2823         default:
2824                 PMD_DRV_LOG(ERR,
2825                         "Unsupported link speed %d; default to AUTO\n",
2826                         conf_link_speed);
2827                 break;
2828         }
2829         return eth_link_speed;
2830 }
2831
2832 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2833                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2834                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2835                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
2836                 ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
2837
2838 static int bnxt_validate_link_speed(struct bnxt *bp)
2839 {
2840         uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
2841         uint16_t port_id = bp->eth_dev->data->port_id;
2842         uint32_t link_speed_capa;
2843         uint32_t one_speed;
2844
2845         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2846                 return 0;
2847
2848         link_speed_capa = bnxt_get_speed_capabilities(bp);
2849
2850         if (link_speed & ETH_LINK_SPEED_FIXED) {
2851                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2852
2853                 if (one_speed & (one_speed - 1)) {
2854                         PMD_DRV_LOG(ERR,
2855                                 "Invalid advertised speeds (%u) for port %u\n",
2856                                 link_speed, port_id);
2857                         return -EINVAL;
2858                 }
2859                 if ((one_speed & link_speed_capa) != one_speed) {
2860                         PMD_DRV_LOG(ERR,
2861                                 "Unsupported advertised speed (%u) for port %u\n",
2862                                 link_speed, port_id);
2863                         return -EINVAL;
2864                 }
2865         } else {
2866                 if (!(link_speed & link_speed_capa)) {
2867                         PMD_DRV_LOG(ERR,
2868                                 "Unsupported advertised speeds (%u) for port %u\n",
2869                                 link_speed, port_id);
2870                         return -EINVAL;
2871                 }
2872         }
2873         return 0;
2874 }
2875
2876 static uint16_t
2877 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2878 {
2879         uint16_t ret = 0;
2880
2881         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2882                 if (bp->link_info->support_speeds)
2883                         return bp->link_info->support_speeds;
2884                 link_speed = BNXT_SUPPORTED_SPEEDS;
2885         }
2886
2887         if (link_speed & ETH_LINK_SPEED_100M)
2888                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2889         if (link_speed & ETH_LINK_SPEED_100M_HD)
2890                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2891         if (link_speed & ETH_LINK_SPEED_1G)
2892                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2893         if (link_speed & ETH_LINK_SPEED_2_5G)
2894                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2895         if (link_speed & ETH_LINK_SPEED_10G)
2896                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2897         if (link_speed & ETH_LINK_SPEED_20G)
2898                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2899         if (link_speed & ETH_LINK_SPEED_25G)
2900                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2901         if (link_speed & ETH_LINK_SPEED_40G)
2902                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2903         if (link_speed & ETH_LINK_SPEED_50G)
2904                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2905         if (link_speed & ETH_LINK_SPEED_100G)
2906                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2907         if (link_speed & ETH_LINK_SPEED_200G)
2908                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_200GB;
2909         return ret;
2910 }
2911
2912 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2913 {
2914         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2915
2916         switch (hw_link_speed) {
2917         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2918                 eth_link_speed = ETH_SPEED_NUM_100M;
2919                 break;
2920         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2921                 eth_link_speed = ETH_SPEED_NUM_1G;
2922                 break;
2923         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2924                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2925                 break;
2926         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2927                 eth_link_speed = ETH_SPEED_NUM_10G;
2928                 break;
2929         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2930                 eth_link_speed = ETH_SPEED_NUM_20G;
2931                 break;
2932         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2933                 eth_link_speed = ETH_SPEED_NUM_25G;
2934                 break;
2935         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2936                 eth_link_speed = ETH_SPEED_NUM_40G;
2937                 break;
2938         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2939                 eth_link_speed = ETH_SPEED_NUM_50G;
2940                 break;
2941         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2942                 eth_link_speed = ETH_SPEED_NUM_100G;
2943                 break;
2944         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
2945                 eth_link_speed = ETH_SPEED_NUM_200G;
2946                 break;
2947         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2948         default:
2949                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2950                         hw_link_speed);
2951                 break;
2952         }
2953         return eth_link_speed;
2954 }
2955
2956 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2957 {
2958         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2959
2960         switch (hw_link_duplex) {
2961         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2962         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2963                 /* FALLTHROUGH */
2964                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2965                 break;
2966         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2967                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2968                 break;
2969         default:
2970                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2971                         hw_link_duplex);
2972                 break;
2973         }
2974         return eth_link_duplex;
2975 }
2976
2977 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2978 {
2979         int rc = 0;
2980         struct bnxt_link_info *link_info = bp->link_info;
2981
2982         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2983         if (rc) {
2984                 PMD_DRV_LOG(ERR,
2985                         "Get link config failed with rc %d\n", rc);
2986                 goto exit;
2987         }
2988         if (link_info->link_speed)
2989                 link->link_speed =
2990                         bnxt_parse_hw_link_speed(link_info->link_speed);
2991         else
2992                 link->link_speed = ETH_SPEED_NUM_NONE;
2993         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2994         link->link_status = link_info->link_up;
2995         link->link_autoneg = link_info->auto_mode ==
2996                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2997                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2998 exit:
2999         return rc;
3000 }
3001
3002 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3003 {
3004         int rc = 0;
3005         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3006         struct bnxt_link_info link_req;
3007         uint16_t speed, autoneg;
3008
3009         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3010                 return 0;
3011
3012         rc = bnxt_validate_link_speed(bp);
3013         if (rc)
3014                 goto error;
3015
3016         memset(&link_req, 0, sizeof(link_req));
3017         link_req.link_up = link_up;
3018         if (!link_up)
3019                 goto port_phy_cfg;
3020
3021         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3022         if (BNXT_CHIP_THOR(bp) &&
3023             dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
3024                 /* 40G is not supported as part of media auto detect.
3025                  * The speed should be forced and autoneg disabled
3026                  * to configure 40G speed.
3027                  */
3028                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3029                 autoneg = 0;
3030         }
3031
3032         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
3033         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3034         /* Autoneg can be done only when the FW allows.
3035          * When user configures fixed speed of 40G and later changes to
3036          * any other speed, auto_link_speed/force_link_speed is still set
3037          * to 40G until link comes up at new speed.
3038          */
3039         if (autoneg == 1 &&
3040             !(!BNXT_CHIP_THOR(bp) &&
3041               (bp->link_info->auto_link_speed ||
3042                bp->link_info->force_link_speed))) {
3043                 link_req.phy_flags |=
3044                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3045                 link_req.auto_link_speed_mask =
3046                         bnxt_parse_eth_link_speed_mask(bp,
3047                                                        dev_conf->link_speeds);
3048         } else {
3049                 if (bp->link_info->phy_type ==
3050                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3051                     bp->link_info->phy_type ==
3052                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3053                     bp->link_info->media_type ==
3054                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3055                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3056                         return -EINVAL;
3057                 }
3058
3059                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3060                 /* If user wants a particular speed try that first. */
3061                 if (speed)
3062                         link_req.link_speed = speed;
3063                 else if (bp->link_info->force_link_speed)
3064                         link_req.link_speed = bp->link_info->force_link_speed;
3065                 else
3066                         link_req.link_speed = bp->link_info->auto_link_speed;
3067         }
3068         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3069         link_req.auto_pause = bp->link_info->auto_pause;
3070         link_req.force_pause = bp->link_info->force_pause;
3071
3072 port_phy_cfg:
3073         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3074         if (rc) {
3075                 PMD_DRV_LOG(ERR,
3076                         "Set link config failed with rc %d\n", rc);
3077         }
3078
3079 error:
3080         return rc;
3081 }
3082
3083 /* JIRA 22088 */
3084 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3085 {
3086         struct hwrm_func_qcfg_input req = {0};
3087         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3088         uint16_t flags;
3089         int rc = 0;
3090         bp->func_svif = BNXT_SVIF_INVALID;
3091         uint16_t svif_info;
3092
3093         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3094         req.fid = rte_cpu_to_le_16(0xffff);
3095
3096         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3097
3098         HWRM_CHECK_RESULT();
3099
3100         /* Hard Coded.. 0xfff VLAN ID mask */
3101         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3102
3103         svif_info = rte_le_to_cpu_16(resp->svif_info);
3104         if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3105                 bp->func_svif = svif_info &
3106                                      HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3107
3108         flags = rte_le_to_cpu_16(resp->flags);
3109         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3110                 bp->flags |= BNXT_FLAG_MULTI_HOST;
3111
3112         if (BNXT_VF(bp) &&
3113             !BNXT_VF_IS_TRUSTED(bp) &&
3114             (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3115                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3116                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3117         } else if (BNXT_VF(bp) &&
3118                    BNXT_VF_IS_TRUSTED(bp) &&
3119                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3120                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3121                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3122         }
3123
3124         if (mtu)
3125                 *mtu = rte_le_to_cpu_16(resp->mtu);
3126
3127         switch (resp->port_partition_type) {
3128         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3129         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3130         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3131                 /* FALLTHROUGH */
3132                 bp->flags |= BNXT_FLAG_NPAR_PF;
3133                 break;
3134         default:
3135                 bp->flags &= ~BNXT_FLAG_NPAR_PF;
3136                 break;
3137         }
3138
3139         HWRM_UNLOCK();
3140
3141         return rc;
3142 }
3143
3144 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3145 {
3146         struct hwrm_func_qcfg_input req = {0};
3147         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3148         int rc;
3149
3150         if (!BNXT_VF_IS_TRUSTED(bp))
3151                 return 0;
3152
3153         if (!bp->parent)
3154                 return -EINVAL;
3155
3156         bp->parent->fid = BNXT_PF_FID_INVALID;
3157
3158         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3159
3160         req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3161
3162         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3163
3164         HWRM_CHECK_RESULT();
3165
3166         memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3167         bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3168         bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3169         bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3170
3171         /* FIXME: Temporary workaround - remove when firmware issue is fixed. */
3172         if (bp->parent->vnic == 0) {
3173                 PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n");
3174                 /* Use hard-coded values appropriate for current Wh+ fw. */
3175                 if (bp->parent->fid == 2)
3176                         bp->parent->vnic = 0x100;
3177                 else
3178                         bp->parent->vnic = 1;
3179         }
3180
3181         HWRM_UNLOCK();
3182
3183         return 0;
3184 }
3185
3186 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3187                                  uint16_t *vnic_id, uint16_t *svif)
3188 {
3189         struct hwrm_func_qcfg_input req = {0};
3190         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3191         uint16_t svif_info;
3192         int rc = 0;
3193
3194         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3195         req.fid = rte_cpu_to_le_16(fid);
3196
3197         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3198
3199         HWRM_CHECK_RESULT();
3200
3201         if (vnic_id)
3202                 *vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3203
3204         svif_info = rte_le_to_cpu_16(resp->svif_info);
3205         if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3206                 *svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3207
3208         HWRM_UNLOCK();
3209
3210         return rc;
3211 }
3212
3213 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3214 {
3215         struct hwrm_port_mac_qcfg_input req = {0};
3216         struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3217         uint16_t port_svif_info;
3218         int rc;
3219
3220         bp->port_svif = BNXT_SVIF_INVALID;
3221
3222         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3223                 return 0;
3224
3225         HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3226
3227         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3228
3229         HWRM_CHECK_RESULT_SILENT();
3230
3231         port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3232         if (port_svif_info &
3233             HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3234                 bp->port_svif = port_svif_info &
3235                         HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3236
3237         HWRM_UNLOCK();
3238
3239         return 0;
3240 }
3241
3242 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
3243                                    struct hwrm_func_qcaps_output *qcaps)
3244 {
3245         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
3246         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
3247                sizeof(qcaps->mac_address));
3248         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
3249         qcaps->max_rx_rings = fcfg->num_rx_rings;
3250         qcaps->max_tx_rings = fcfg->num_tx_rings;
3251         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
3252         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
3253         qcaps->max_vfs = 0;
3254         qcaps->first_vf_id = 0;
3255         qcaps->max_vnics = fcfg->num_vnics;
3256         qcaps->max_decap_records = 0;
3257         qcaps->max_encap_records = 0;
3258         qcaps->max_tx_wm_flows = 0;
3259         qcaps->max_tx_em_flows = 0;
3260         qcaps->max_rx_wm_flows = 0;
3261         qcaps->max_rx_em_flows = 0;
3262         qcaps->max_flow_id = 0;
3263         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
3264         qcaps->max_sp_tx_rings = 0;
3265         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
3266 }
3267
3268 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
3269 {
3270         struct hwrm_func_cfg_input req = {0};
3271         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3272         uint32_t enables;
3273         int rc;
3274
3275         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3276                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3277                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3278                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3279                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3280                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3281                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3282                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3283                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3284
3285         if (BNXT_HAS_RING_GRPS(bp)) {
3286                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3287                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
3288         } else if (BNXT_HAS_NQ(bp)) {
3289                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3290                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3291         }
3292
3293         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3294         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3295         req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3296         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
3297         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
3298         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
3299         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
3300         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
3301         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
3302         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3303         req.fid = rte_cpu_to_le_16(0xffff);
3304         req.enables = rte_cpu_to_le_32(enables);
3305
3306         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3307
3308         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3309
3310         HWRM_CHECK_RESULT();
3311         HWRM_UNLOCK();
3312
3313         return rc;
3314 }
3315
3316 static void populate_vf_func_cfg_req(struct bnxt *bp,
3317                                      struct hwrm_func_cfg_input *req,
3318                                      int num_vfs)
3319 {
3320         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3321                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3322                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3323                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3324                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3325                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3326                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3327                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3328                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3329                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3330
3331         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3332                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3333                                     BNXT_NUM_VLANS);
3334         req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3335         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3336                                                 (num_vfs + 1));
3337         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3338         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3339                                                (num_vfs + 1));
3340         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3341         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3342         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3343         /* TODO: For now, do not support VMDq/RFS on VFs. */
3344         req->num_vnics = rte_cpu_to_le_16(1);
3345         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3346                                                  (num_vfs + 1));
3347 }
3348
3349 static void add_random_mac_if_needed(struct bnxt *bp,
3350                                      struct hwrm_func_cfg_input *cfg_req,
3351                                      int vf)
3352 {
3353         struct rte_ether_addr mac;
3354
3355         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
3356                 return;
3357
3358         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
3359                 cfg_req->enables |=
3360                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3361                 rte_eth_random_addr(cfg_req->dflt_mac_addr);
3362                 bp->pf->vf_info[vf].random_mac = true;
3363         } else {
3364                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
3365                         RTE_ETHER_ADDR_LEN);
3366         }
3367 }
3368
3369 static int reserve_resources_from_vf(struct bnxt *bp,
3370                                      struct hwrm_func_cfg_input *cfg_req,
3371                                      int vf)
3372 {
3373         struct hwrm_func_qcaps_input req = {0};
3374         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3375         int rc;
3376
3377         /* Get the actual allocated values now */
3378         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
3379         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3380         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3381
3382         if (rc) {
3383                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
3384                 copy_func_cfg_to_qcaps(cfg_req, resp);
3385         } else if (resp->error_code) {
3386                 rc = rte_le_to_cpu_16(resp->error_code);
3387                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
3388                 copy_func_cfg_to_qcaps(cfg_req, resp);
3389         }
3390
3391         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
3392         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
3393         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
3394         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
3395         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
3396         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
3397         /*
3398          * TODO: While not supporting VMDq with VFs, max_vnics is always
3399          * forced to 1 in this case
3400          */
3401         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
3402         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
3403
3404         HWRM_UNLOCK();
3405
3406         return 0;
3407 }
3408
3409 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3410 {
3411         struct hwrm_func_qcfg_input req = {0};
3412         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3413         int rc;
3414
3415         /* Check for zero MAC address */
3416         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3417         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3418         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3419         HWRM_CHECK_RESULT();
3420         rc = rte_le_to_cpu_16(resp->vlan);
3421
3422         HWRM_UNLOCK();
3423
3424         return rc;
3425 }
3426
3427 static int update_pf_resource_max(struct bnxt *bp)
3428 {
3429         struct hwrm_func_qcfg_input req = {0};
3430         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3431         int rc;
3432
3433         /* And copy the allocated numbers into the pf struct */
3434         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3435         req.fid = rte_cpu_to_le_16(0xffff);
3436         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3437         HWRM_CHECK_RESULT();
3438
3439         /* Only TX ring value reflects actual allocation? TODO */
3440         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3441         bp->pf->evb_mode = resp->evb_mode;
3442
3443         HWRM_UNLOCK();
3444
3445         return rc;
3446 }
3447
3448 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3449 {
3450         int rc;
3451
3452         if (!BNXT_PF(bp)) {
3453                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3454                 return -EINVAL;
3455         }
3456
3457         rc = bnxt_hwrm_func_qcaps(bp);
3458         if (rc)
3459                 return rc;
3460
3461         bp->pf->func_cfg_flags &=
3462                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3463                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3464         bp->pf->func_cfg_flags |=
3465                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3466         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3467         rc = __bnxt_hwrm_func_qcaps(bp);
3468         return rc;
3469 }
3470
3471 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3472 {
3473         struct hwrm_func_cfg_input req = {0};
3474         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3475         int i;
3476         size_t sz;
3477         int rc = 0;
3478         size_t req_buf_sz;
3479
3480         if (!BNXT_PF(bp)) {
3481                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3482                 return -EINVAL;
3483         }
3484
3485         rc = bnxt_hwrm_func_qcaps(bp);
3486
3487         if (rc)
3488                 return rc;
3489
3490         bp->pf->active_vfs = num_vfs;
3491
3492         /*
3493          * First, configure the PF to only use one TX ring.  This ensures that
3494          * there are enough rings for all VFs.
3495          *
3496          * If we don't do this, when we call func_alloc() later, we will lock
3497          * extra rings to the PF that won't be available during func_cfg() of
3498          * the VFs.
3499          *
3500          * This has been fixed with firmware versions above 20.6.54
3501          */
3502         bp->pf->func_cfg_flags &=
3503                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3504                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3505         bp->pf->func_cfg_flags |=
3506                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3507         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
3508         if (rc)
3509                 return rc;
3510
3511         /*
3512          * Now, create and register a buffer to hold forwarded VF requests
3513          */
3514         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3515         bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3516                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3517         if (bp->pf->vf_req_buf == NULL) {
3518                 rc = -ENOMEM;
3519                 goto error_free;
3520         }
3521         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3522                 rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3523         for (i = 0; i < num_vfs; i++)
3524                 bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3525                                         (i * HWRM_MAX_REQ_LEN);
3526
3527         rc = bnxt_hwrm_func_buf_rgtr(bp);
3528         if (rc)
3529                 goto error_free;
3530
3531         populate_vf_func_cfg_req(bp, &req, num_vfs);
3532
3533         bp->pf->active_vfs = 0;
3534         for (i = 0; i < num_vfs; i++) {
3535                 add_random_mac_if_needed(bp, &req, i);
3536
3537                 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3538                 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3539                 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3540                 rc = bnxt_hwrm_send_message(bp,
3541                                             &req,
3542                                             sizeof(req),
3543                                             BNXT_USE_CHIMP_MB);
3544
3545                 /* Clear enable flag for next pass */
3546                 req.enables &= ~rte_cpu_to_le_32(
3547                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3548
3549                 if (rc || resp->error_code) {
3550                         PMD_DRV_LOG(ERR,
3551                                 "Failed to initizlie VF %d\n", i);
3552                         PMD_DRV_LOG(ERR,
3553                                 "Not all VFs available. (%d, %d)\n",
3554                                 rc, resp->error_code);
3555                         HWRM_UNLOCK();
3556                         break;
3557                 }
3558
3559                 HWRM_UNLOCK();
3560
3561                 reserve_resources_from_vf(bp, &req, i);
3562                 bp->pf->active_vfs++;
3563                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3564         }
3565
3566         /*
3567          * Now configure the PF to use "the rest" of the resources
3568          * We're using STD_TX_RING_MODE here though which will limit the TX
3569          * rings.  This will allow QoS to function properly.  Not setting this
3570          * will cause PF rings to break bandwidth settings.
3571          */
3572         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3573         if (rc)
3574                 goto error_free;
3575
3576         rc = update_pf_resource_max(bp);
3577         if (rc)
3578                 goto error_free;
3579
3580         return rc;
3581
3582 error_free:
3583         bnxt_hwrm_func_buf_unrgtr(bp);
3584         return rc;
3585 }
3586
3587 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3588 {
3589         struct hwrm_func_cfg_input req = {0};
3590         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3591         int rc;
3592
3593         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3594
3595         req.fid = rte_cpu_to_le_16(0xffff);
3596         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3597         req.evb_mode = bp->pf->evb_mode;
3598
3599         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3600         HWRM_CHECK_RESULT();
3601         HWRM_UNLOCK();
3602
3603         return rc;
3604 }
3605
3606 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3607                                 uint8_t tunnel_type)
3608 {
3609         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3610         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3611         int rc = 0;
3612
3613         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3614         req.tunnel_type = tunnel_type;
3615         req.tunnel_dst_port_val = port;
3616         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3617         HWRM_CHECK_RESULT();
3618
3619         switch (tunnel_type) {
3620         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3621                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3622                 bp->vxlan_port = port;
3623                 break;
3624         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3625                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
3626                 bp->geneve_port = port;
3627                 break;
3628         default:
3629                 break;
3630         }
3631
3632         HWRM_UNLOCK();
3633
3634         return rc;
3635 }
3636
3637 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3638                                 uint8_t tunnel_type)
3639 {
3640         struct hwrm_tunnel_dst_port_free_input req = {0};
3641         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3642         int rc = 0;
3643
3644         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3645
3646         req.tunnel_type = tunnel_type;
3647         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3648         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3649
3650         HWRM_CHECK_RESULT();
3651         HWRM_UNLOCK();
3652
3653         return rc;
3654 }
3655
3656 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3657                                         uint32_t flags)
3658 {
3659         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3660         struct hwrm_func_cfg_input req = {0};
3661         int rc;
3662
3663         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3664
3665         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3666         req.flags = rte_cpu_to_le_32(flags);
3667         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3668
3669         HWRM_CHECK_RESULT();
3670         HWRM_UNLOCK();
3671
3672         return rc;
3673 }
3674
3675 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3676 {
3677         uint32_t *flag = flagp;
3678
3679         vnic->flags = *flag;
3680 }
3681
3682 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3683 {
3684         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3685 }
3686
3687 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
3688 {
3689         int rc = 0;
3690         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3691         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3692
3693         HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3694
3695         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3696         req.req_buf_page_size = rte_cpu_to_le_16(
3697                          page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN));
3698         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3699         req.req_buf_page_addr0 =
3700                 rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
3701         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3702                 PMD_DRV_LOG(ERR,
3703                         "unable to map buffer address to physical memory\n");
3704                 return -ENOMEM;
3705         }
3706
3707         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3708
3709         HWRM_CHECK_RESULT();
3710         HWRM_UNLOCK();
3711
3712         return rc;
3713 }
3714
3715 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3716 {
3717         int rc = 0;
3718         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3719         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3720
3721         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3722                 return 0;
3723
3724         HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3725
3726         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3727
3728         HWRM_CHECK_RESULT();
3729         HWRM_UNLOCK();
3730
3731         return rc;
3732 }
3733
3734 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3735 {
3736         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3737         struct hwrm_func_cfg_input req = {0};
3738         int rc;
3739
3740         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3741
3742         req.fid = rte_cpu_to_le_16(0xffff);
3743         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3744         req.enables = rte_cpu_to_le_32(
3745                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3746         req.async_event_cr = rte_cpu_to_le_16(
3747                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3748         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3749
3750         HWRM_CHECK_RESULT();
3751         HWRM_UNLOCK();
3752
3753         return rc;
3754 }
3755
3756 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3757 {
3758         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3759         struct hwrm_func_vf_cfg_input req = {0};
3760         int rc;
3761
3762         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3763
3764         req.enables = rte_cpu_to_le_32(
3765                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3766         req.async_event_cr = rte_cpu_to_le_16(
3767                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3768         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3769
3770         HWRM_CHECK_RESULT();
3771         HWRM_UNLOCK();
3772
3773         return rc;
3774 }
3775
3776 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3777 {
3778         struct hwrm_func_cfg_input req = {0};
3779         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3780         uint16_t dflt_vlan, fid;
3781         uint32_t func_cfg_flags;
3782         int rc = 0;
3783
3784         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3785
3786         if (is_vf) {
3787                 dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
3788                 fid = bp->pf->vf_info[vf].fid;
3789                 func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
3790         } else {
3791                 fid = rte_cpu_to_le_16(0xffff);
3792                 func_cfg_flags = bp->pf->func_cfg_flags;
3793                 dflt_vlan = bp->vlan;
3794         }
3795
3796         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3797         req.fid = rte_cpu_to_le_16(fid);
3798         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3799         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3800
3801         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3802
3803         HWRM_CHECK_RESULT();
3804         HWRM_UNLOCK();
3805
3806         return rc;
3807 }
3808
3809 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3810                         uint16_t max_bw, uint16_t enables)
3811 {
3812         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3813         struct hwrm_func_cfg_input req = {0};
3814         int rc;
3815
3816         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3817
3818         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3819         req.enables |= rte_cpu_to_le_32(enables);
3820         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
3821         req.max_bw = rte_cpu_to_le_32(max_bw);
3822         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3823
3824         HWRM_CHECK_RESULT();
3825         HWRM_UNLOCK();
3826
3827         return rc;
3828 }
3829
3830 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3831 {
3832         struct hwrm_func_cfg_input req = {0};
3833         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3834         int rc = 0;
3835
3836         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3837
3838         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
3839         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3840         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3841         req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
3842
3843         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3844
3845         HWRM_CHECK_RESULT();
3846         HWRM_UNLOCK();
3847
3848         return rc;
3849 }
3850
3851 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3852 {
3853         int rc;
3854
3855         if (BNXT_PF(bp))
3856                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3857         else
3858                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3859
3860         return rc;
3861 }
3862
3863 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3864                               void *encaped, size_t ec_size)
3865 {
3866         int rc = 0;
3867         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3868         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3869
3870         if (ec_size > sizeof(req.encap_request))
3871                 return -1;
3872
3873         HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3874
3875         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3876         memcpy(req.encap_request, encaped, ec_size);
3877
3878         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3879
3880         HWRM_CHECK_RESULT();
3881         HWRM_UNLOCK();
3882
3883         return rc;
3884 }
3885
3886 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3887                                        struct rte_ether_addr *mac)
3888 {
3889         struct hwrm_func_qcfg_input req = {0};
3890         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3891         int rc;
3892
3893         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3894
3895         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3896         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3897
3898         HWRM_CHECK_RESULT();
3899
3900         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3901
3902         HWRM_UNLOCK();
3903
3904         return rc;
3905 }
3906
3907 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3908                             void *encaped, size_t ec_size)
3909 {
3910         int rc = 0;
3911         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3912         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3913
3914         if (ec_size > sizeof(req.encap_request))
3915                 return -1;
3916
3917         HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3918
3919         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3920         memcpy(req.encap_request, encaped, ec_size);
3921
3922         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3923
3924         HWRM_CHECK_RESULT();
3925         HWRM_UNLOCK();
3926
3927         return rc;
3928 }
3929
3930 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3931                          struct rte_eth_stats *stats, uint8_t rx)
3932 {
3933         int rc = 0;
3934         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3935         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3936
3937         HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3938
3939         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3940
3941         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3942
3943         HWRM_CHECK_RESULT();
3944
3945         if (rx) {
3946                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3947                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3948                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3949                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3950                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3951                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3952                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3953                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3954         } else {
3955                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3956                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3957                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3958                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3959                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3960                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3961         }
3962
3963         HWRM_UNLOCK();
3964
3965         return rc;
3966 }
3967
3968 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3969 {
3970         struct hwrm_port_qstats_input req = {0};
3971         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3972         struct bnxt_pf_info *pf = bp->pf;
3973         int rc;
3974
3975         HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
3976
3977         req.port_id = rte_cpu_to_le_16(pf->port_id);
3978         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3979         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3980         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3981
3982         HWRM_CHECK_RESULT();
3983         HWRM_UNLOCK();
3984
3985         return rc;
3986 }
3987
3988 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3989 {
3990         struct hwrm_port_clr_stats_input req = {0};
3991         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3992         struct bnxt_pf_info *pf = bp->pf;
3993         int rc;
3994
3995         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3996         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3997             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3998                 return 0;
3999
4000         HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4001
4002         req.port_id = rte_cpu_to_le_16(pf->port_id);
4003         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4004
4005         HWRM_CHECK_RESULT();
4006         HWRM_UNLOCK();
4007
4008         return rc;
4009 }
4010
4011 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4012 {
4013         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4014         struct hwrm_port_led_qcaps_input req = {0};
4015         int rc;
4016
4017         if (BNXT_VF(bp))
4018                 return 0;
4019
4020         HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4021         req.port_id = bp->pf->port_id;
4022         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4023
4024         HWRM_CHECK_RESULT();
4025
4026         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4027                 unsigned int i;
4028
4029                 bp->leds->num_leds = resp->num_leds;
4030                 memcpy(bp->leds, &resp->led0_id,
4031                         sizeof(bp->leds[0]) * bp->leds->num_leds);
4032                 for (i = 0; i < bp->leds->num_leds; i++) {
4033                         struct bnxt_led_info *led = &bp->leds[i];
4034
4035                         uint16_t caps = led->led_state_caps;
4036
4037                         if (!led->led_group_id ||
4038                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
4039                                 bp->leds->num_leds = 0;
4040                                 break;
4041                         }
4042                 }
4043         }
4044
4045         HWRM_UNLOCK();
4046
4047         return rc;
4048 }
4049
4050 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4051 {
4052         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4053         struct hwrm_port_led_cfg_input req = {0};
4054         struct bnxt_led_cfg *led_cfg;
4055         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4056         uint16_t duration = 0;
4057         int rc, i;
4058
4059         if (!bp->leds->num_leds || BNXT_VF(bp))
4060                 return -EOPNOTSUPP;
4061
4062         HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4063
4064         if (led_on) {
4065                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4066                 duration = rte_cpu_to_le_16(500);
4067         }
4068         req.port_id = bp->pf->port_id;
4069         req.num_leds = bp->leds->num_leds;
4070         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4071         for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4072                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
4073                 led_cfg->led_id = bp->leds[i].led_id;
4074                 led_cfg->led_state = led_state;
4075                 led_cfg->led_blink_on = duration;
4076                 led_cfg->led_blink_off = duration;
4077                 led_cfg->led_group_id = bp->leds[i].led_group_id;
4078         }
4079
4080         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4081
4082         HWRM_CHECK_RESULT();
4083         HWRM_UNLOCK();
4084
4085         return rc;
4086 }
4087
4088 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4089                                uint32_t *length)
4090 {
4091         int rc;
4092         struct hwrm_nvm_get_dir_info_input req = {0};
4093         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4094
4095         HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4096
4097         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4098
4099         HWRM_CHECK_RESULT();
4100
4101         *entries = rte_le_to_cpu_32(resp->entries);
4102         *length = rte_le_to_cpu_32(resp->entry_length);
4103
4104         HWRM_UNLOCK();
4105         return rc;
4106 }
4107
4108 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4109 {
4110         int rc;
4111         uint32_t dir_entries;
4112         uint32_t entry_length;
4113         uint8_t *buf;
4114         size_t buflen;
4115         rte_iova_t dma_handle;
4116         struct hwrm_nvm_get_dir_entries_input req = {0};
4117         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4118
4119         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4120         if (rc != 0)
4121                 return rc;
4122
4123         *data++ = dir_entries;
4124         *data++ = entry_length;
4125         len -= 2;
4126         memset(data, 0xff, len);
4127
4128         buflen = dir_entries * entry_length;
4129         buf = rte_malloc("nvm_dir", buflen, 0);
4130         if (buf == NULL)
4131                 return -ENOMEM;
4132         dma_handle = rte_malloc_virt2iova(buf);
4133         if (dma_handle == RTE_BAD_IOVA) {
4134                 PMD_DRV_LOG(ERR,
4135                         "unable to map response address to physical memory\n");
4136                 return -ENOMEM;
4137         }
4138         HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4139         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4140         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4141
4142         if (rc == 0)
4143                 memcpy(data, buf, len > buflen ? buflen : len);
4144
4145         rte_free(buf);
4146         HWRM_CHECK_RESULT();
4147         HWRM_UNLOCK();
4148
4149         return rc;
4150 }
4151
4152 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4153                              uint32_t offset, uint32_t length,
4154                              uint8_t *data)
4155 {
4156         int rc;
4157         uint8_t *buf;
4158         rte_iova_t dma_handle;
4159         struct hwrm_nvm_read_input req = {0};
4160         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4161
4162         buf = rte_malloc("nvm_item", length, 0);
4163         if (!buf)
4164                 return -ENOMEM;
4165
4166         dma_handle = rte_malloc_virt2iova(buf);
4167         if (dma_handle == RTE_BAD_IOVA) {
4168                 PMD_DRV_LOG(ERR,
4169                         "unable to map response address to physical memory\n");
4170                 return -ENOMEM;
4171         }
4172         HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4173         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4174         req.dir_idx = rte_cpu_to_le_16(index);
4175         req.offset = rte_cpu_to_le_32(offset);
4176         req.len = rte_cpu_to_le_32(length);
4177         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4178         if (rc == 0)
4179                 memcpy(data, buf, length);
4180
4181         rte_free(buf);
4182         HWRM_CHECK_RESULT();
4183         HWRM_UNLOCK();
4184
4185         return rc;
4186 }
4187
4188 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4189 {
4190         int rc;
4191         struct hwrm_nvm_erase_dir_entry_input req = {0};
4192         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4193
4194         HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4195         req.dir_idx = rte_cpu_to_le_16(index);
4196         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4197         HWRM_CHECK_RESULT();
4198         HWRM_UNLOCK();
4199
4200         return rc;
4201 }
4202
4203
4204 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4205                           uint16_t dir_ordinal, uint16_t dir_ext,
4206                           uint16_t dir_attr, const uint8_t *data,
4207                           size_t data_len)
4208 {
4209         int rc;
4210         struct hwrm_nvm_write_input req = {0};
4211         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4212         rte_iova_t dma_handle;
4213         uint8_t *buf;
4214
4215         buf = rte_malloc("nvm_write", data_len, 0);
4216         if (!buf)
4217                 return -ENOMEM;
4218
4219         dma_handle = rte_malloc_virt2iova(buf);
4220         if (dma_handle == RTE_BAD_IOVA) {
4221                 PMD_DRV_LOG(ERR,
4222                         "unable to map response address to physical memory\n");
4223                 return -ENOMEM;
4224         }
4225         memcpy(buf, data, data_len);
4226
4227         HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4228
4229         req.dir_type = rte_cpu_to_le_16(dir_type);
4230         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4231         req.dir_ext = rte_cpu_to_le_16(dir_ext);
4232         req.dir_attr = rte_cpu_to_le_16(dir_attr);
4233         req.dir_data_length = rte_cpu_to_le_32(data_len);
4234         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4235
4236         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4237
4238         rte_free(buf);
4239         HWRM_CHECK_RESULT();
4240         HWRM_UNLOCK();
4241
4242         return rc;
4243 }
4244
4245 static void
4246 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4247 {
4248         uint32_t *count = cbdata;
4249
4250         *count = *count + 1;
4251 }
4252
4253 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4254                                      struct bnxt_vnic_info *vnic __rte_unused)
4255 {
4256         return 0;
4257 }
4258
4259 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4260 {
4261         uint32_t count = 0;
4262
4263         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4264             &count, bnxt_vnic_count_hwrm_stub);
4265
4266         return count;
4267 }
4268
4269 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4270                                         uint16_t *vnic_ids)
4271 {
4272         struct hwrm_func_vf_vnic_ids_query_input req = {0};
4273         struct hwrm_func_vf_vnic_ids_query_output *resp =
4274                                                 bp->hwrm_cmd_resp_addr;
4275         int rc;
4276
4277         /* First query all VNIC ids */
4278         HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4279
4280         req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4281         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4282         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4283
4284         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4285                 HWRM_UNLOCK();
4286                 PMD_DRV_LOG(ERR,
4287                 "unable to map VNIC ID table address to physical memory\n");
4288                 return -ENOMEM;
4289         }
4290         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4291         HWRM_CHECK_RESULT();
4292         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4293
4294         HWRM_UNLOCK();
4295
4296         return rc;
4297 }
4298
4299 /*
4300  * This function queries the VNIC IDs  for a specified VF. It then calls
4301  * the vnic_cb to update the necessary field in vnic_info with cbdata.
4302  * Then it calls the hwrm_cb function to program this new vnic configuration.
4303  */
4304 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4305         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4306         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4307 {
4308         struct bnxt_vnic_info vnic;
4309         int rc = 0;
4310         int i, num_vnic_ids;
4311         uint16_t *vnic_ids;
4312         size_t vnic_id_sz;
4313         size_t sz;
4314
4315         /* First query all VNIC ids */
4316         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4317         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4318                         RTE_CACHE_LINE_SIZE);
4319         if (vnic_ids == NULL)
4320                 return -ENOMEM;
4321
4322         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4323                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4324
4325         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4326
4327         if (num_vnic_ids < 0)
4328                 return num_vnic_ids;
4329
4330         /* Retrieve VNIC, update bd_stall then update */
4331
4332         for (i = 0; i < num_vnic_ids; i++) {
4333                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4334                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4335                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4336                 if (rc)
4337                         break;
4338                 if (vnic.mru <= 4)      /* Indicates unallocated */
4339                         continue;
4340
4341                 vnic_cb(&vnic, cbdata);
4342
4343                 rc = hwrm_cb(bp, &vnic);
4344                 if (rc)
4345                         break;
4346         }
4347
4348         rte_free(vnic_ids);
4349
4350         return rc;
4351 }
4352
4353 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4354                                               bool on)
4355 {
4356         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4357         struct hwrm_func_cfg_input req = {0};
4358         int rc;
4359
4360         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4361
4362         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4363         req.enables |= rte_cpu_to_le_32(
4364                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4365         req.vlan_antispoof_mode = on ?
4366                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4367                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4368         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4369
4370         HWRM_CHECK_RESULT();
4371         HWRM_UNLOCK();
4372
4373         return rc;
4374 }
4375
4376 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4377 {
4378         struct bnxt_vnic_info vnic;
4379         uint16_t *vnic_ids;
4380         size_t vnic_id_sz;
4381         int num_vnic_ids, i;
4382         size_t sz;
4383         int rc;
4384
4385         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4386         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4387                         RTE_CACHE_LINE_SIZE);
4388         if (vnic_ids == NULL)
4389                 return -ENOMEM;
4390
4391         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4392                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4393
4394         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4395         if (rc <= 0)
4396                 goto exit;
4397         num_vnic_ids = rc;
4398
4399         /*
4400          * Loop through to find the default VNIC ID.
4401          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4402          * by sending the hwrm_func_qcfg command to the firmware.
4403          */
4404         for (i = 0; i < num_vnic_ids; i++) {
4405                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4406                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4407                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4408                                         bp->pf->first_vf_id + vf);
4409                 if (rc)
4410                         goto exit;
4411                 if (vnic.func_default) {
4412                         rte_free(vnic_ids);
4413                         return vnic.fw_vnic_id;
4414                 }
4415         }
4416         /* Could not find a default VNIC. */
4417         PMD_DRV_LOG(ERR, "No default VNIC\n");
4418 exit:
4419         rte_free(vnic_ids);
4420         return rc;
4421 }
4422
4423 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4424                          uint16_t dst_id,
4425                          struct bnxt_filter_info *filter)
4426 {
4427         int rc = 0;
4428         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4429         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4430         uint32_t enables = 0;
4431
4432         if (filter->fw_em_filter_id != UINT64_MAX)
4433                 bnxt_hwrm_clear_em_filter(bp, filter);
4434
4435         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4436
4437         req.flags = rte_cpu_to_le_32(filter->flags);
4438
4439         enables = filter->enables |
4440               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4441         req.dst_id = rte_cpu_to_le_16(dst_id);
4442
4443         if (filter->ip_addr_type) {
4444                 req.ip_addr_type = filter->ip_addr_type;
4445                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4446         }
4447         if (enables &
4448             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4449                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4450         if (enables &
4451             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4452                 memcpy(req.src_macaddr, filter->src_macaddr,
4453                        RTE_ETHER_ADDR_LEN);
4454         if (enables &
4455             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4456                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4457                        RTE_ETHER_ADDR_LEN);
4458         if (enables &
4459             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4460                 req.ovlan_vid = filter->l2_ovlan;
4461         if (enables &
4462             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4463                 req.ivlan_vid = filter->l2_ivlan;
4464         if (enables &
4465             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4466                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4467         if (enables &
4468             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4469                 req.ip_protocol = filter->ip_protocol;
4470         if (enables &
4471             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4472                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4473         if (enables &
4474             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4475                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4476         if (enables &
4477             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4478                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4479         if (enables &
4480             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4481                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4482         if (enables &
4483             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4484                 req.mirror_vnic_id = filter->mirror_vnic_id;
4485
4486         req.enables = rte_cpu_to_le_32(enables);
4487
4488         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4489
4490         HWRM_CHECK_RESULT();
4491
4492         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4493         HWRM_UNLOCK();
4494
4495         return rc;
4496 }
4497
4498 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4499 {
4500         int rc = 0;
4501         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4502         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4503
4504         if (filter->fw_em_filter_id == UINT64_MAX)
4505                 return 0;
4506
4507         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4508
4509         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4510
4511         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4512
4513         HWRM_CHECK_RESULT();
4514         HWRM_UNLOCK();
4515
4516         filter->fw_em_filter_id = UINT64_MAX;
4517         filter->fw_l2_filter_id = UINT64_MAX;
4518
4519         return 0;
4520 }
4521
4522 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4523                          uint16_t dst_id,
4524                          struct bnxt_filter_info *filter)
4525 {
4526         int rc = 0;
4527         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4528         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4529                                                 bp->hwrm_cmd_resp_addr;
4530         uint32_t enables = 0;
4531
4532         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4533                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4534
4535         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4536
4537         req.flags = rte_cpu_to_le_32(filter->flags);
4538
4539         enables = filter->enables |
4540               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4541         req.dst_id = rte_cpu_to_le_16(dst_id);
4542
4543         if (filter->ip_addr_type) {
4544                 req.ip_addr_type = filter->ip_addr_type;
4545                 enables |=
4546                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4547         }
4548         if (enables &
4549             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4550                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4551         if (enables &
4552             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4553                 memcpy(req.src_macaddr, filter->src_macaddr,
4554                        RTE_ETHER_ADDR_LEN);
4555         if (enables &
4556             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4557                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4558         if (enables &
4559             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4560                 req.ip_protocol = filter->ip_protocol;
4561         if (enables &
4562             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4563                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4564         if (enables &
4565             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4566                 req.src_ipaddr_mask[0] =
4567                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4568         if (enables &
4569             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4570                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4571         if (enables &
4572             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4573                 req.dst_ipaddr_mask[0] =
4574                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4575         if (enables &
4576             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4577                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4578         if (enables &
4579             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4580                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4581         if (enables &
4582             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4583                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4584         if (enables &
4585             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4586                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4587         if (enables &
4588             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4589                 req.mirror_vnic_id = filter->mirror_vnic_id;
4590
4591         req.enables = rte_cpu_to_le_32(enables);
4592
4593         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4594
4595         HWRM_CHECK_RESULT();
4596
4597         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4598         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
4599         HWRM_UNLOCK();
4600
4601         return rc;
4602 }
4603
4604 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4605                                 struct bnxt_filter_info *filter)
4606 {
4607         int rc = 0;
4608         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4609         struct hwrm_cfa_ntuple_filter_free_output *resp =
4610                                                 bp->hwrm_cmd_resp_addr;
4611
4612         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4613                 return 0;
4614
4615         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4616
4617         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4618
4619         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4620
4621         HWRM_CHECK_RESULT();
4622         HWRM_UNLOCK();
4623
4624         filter->fw_ntuple_filter_id = UINT64_MAX;
4625
4626         return 0;
4627 }
4628
4629 static int
4630 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4631 {
4632         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4633         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4634         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4635         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4636         uint16_t *ring_tbl = vnic->rss_table;
4637         int nr_ctxs = vnic->num_lb_ctxts;
4638         int max_rings = bp->rx_nr_rings;
4639         int i, j, k, cnt;
4640         int rc = 0;
4641
4642         for (i = 0, k = 0; i < nr_ctxs; i++) {
4643                 struct bnxt_rx_ring_info *rxr;
4644                 struct bnxt_cp_ring_info *cpr;
4645
4646                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4647
4648                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4649                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4650                 req.hash_mode_flags = vnic->hash_mode;
4651
4652                 req.ring_grp_tbl_addr =
4653                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4654                                      i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4655                                      2 * sizeof(*ring_tbl));
4656                 req.hash_key_tbl_addr =
4657                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4658
4659                 req.ring_table_pair_index = i;
4660                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4661
4662                 for (j = 0; j < 64; j++) {
4663                         uint16_t ring_id;
4664
4665                         /* Find next active ring. */
4666                         for (cnt = 0; cnt < max_rings; cnt++) {
4667                                 if (rx_queue_state[k] !=
4668                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4669                                         break;
4670                                 if (++k == max_rings)
4671                                         k = 0;
4672                         }
4673
4674                         /* Return if no rings are active. */
4675                         if (cnt == max_rings) {
4676                                 HWRM_UNLOCK();
4677                                 return 0;
4678                         }
4679
4680                         /* Add rx/cp ring pair to RSS table. */
4681                         rxr = rxqs[k]->rx_ring;
4682                         cpr = rxqs[k]->cp_ring;
4683
4684                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4685                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4686                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4687                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4688
4689                         if (++k == max_rings)
4690                                 k = 0;
4691                 }
4692                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4693                                             BNXT_USE_CHIMP_MB);
4694
4695                 HWRM_CHECK_RESULT();
4696                 HWRM_UNLOCK();
4697         }
4698
4699         return rc;
4700 }
4701
4702 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4703 {
4704         unsigned int rss_idx, fw_idx, i;
4705
4706         if (!(vnic->rss_table && vnic->hash_type))
4707                 return 0;
4708
4709         if (BNXT_CHIP_THOR(bp))
4710                 return bnxt_vnic_rss_configure_thor(bp, vnic);
4711
4712         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4713                 return 0;
4714
4715         if (vnic->rss_table && vnic->hash_type) {
4716                 /*
4717                  * Fill the RSS hash & redirection table with
4718                  * ring group ids for all VNICs
4719                  */
4720                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4721                         rss_idx++, fw_idx++) {
4722                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4723                                 fw_idx %= bp->rx_cp_nr_rings;
4724                                 if (vnic->fw_grp_ids[fw_idx] !=
4725                                     INVALID_HW_RING_ID)
4726                                         break;
4727                                 fw_idx++;
4728                         }
4729                         if (i == bp->rx_cp_nr_rings)
4730                                 return 0;
4731                         vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4732                 }
4733                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4734         }
4735
4736         return 0;
4737 }
4738
4739 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4740         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4741 {
4742         uint16_t flags;
4743
4744         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4745
4746         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4747         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4748
4749         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4750         req->num_cmpl_dma_aggr_during_int =
4751                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4752
4753         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4754
4755         /* min timer set to 1/2 of interrupt timer */
4756         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4757
4758         /* buf timer set to 1/4 of interrupt timer */
4759         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4760
4761         req->cmpl_aggr_dma_tmr_during_int =
4762                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4763
4764         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4765                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4766         req->flags = rte_cpu_to_le_16(flags);
4767 }
4768
4769 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4770                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4771 {
4772         struct hwrm_ring_aggint_qcaps_input req = {0};
4773         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4774         uint32_t enables;
4775         uint16_t flags;
4776         int rc;
4777
4778         HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4779         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4780         HWRM_CHECK_RESULT();
4781
4782         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4783         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4784
4785         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4786                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4787         agg_req->flags = rte_cpu_to_le_16(flags);
4788         enables =
4789          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4790          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4791         agg_req->enables = rte_cpu_to_le_32(enables);
4792
4793         HWRM_UNLOCK();
4794         return rc;
4795 }
4796
4797 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4798                         struct bnxt_coal *coal, uint16_t ring_id)
4799 {
4800         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4801         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4802                                                 bp->hwrm_cmd_resp_addr;
4803         int rc;
4804
4805         /* Set ring coalesce parameters only for 100G NICs */
4806         if (BNXT_CHIP_THOR(bp)) {
4807                 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4808                         return -1;
4809         } else if (bnxt_stratus_device(bp)) {
4810                 bnxt_hwrm_set_coal_params(coal, &req);
4811         } else {
4812                 return 0;
4813         }
4814
4815         HWRM_PREP(&req,
4816                   HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
4817                   BNXT_USE_CHIMP_MB);
4818         req.ring_id = rte_cpu_to_le_16(ring_id);
4819         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4820         HWRM_CHECK_RESULT();
4821         HWRM_UNLOCK();
4822         return 0;
4823 }
4824
4825 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4826 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4827 {
4828         struct hwrm_func_backing_store_qcaps_input req = {0};
4829         struct hwrm_func_backing_store_qcaps_output *resp =
4830                 bp->hwrm_cmd_resp_addr;
4831         struct bnxt_ctx_pg_info *ctx_pg;
4832         struct bnxt_ctx_mem_info *ctx;
4833         int total_alloc_len;
4834         int rc, i, tqm_rings;
4835
4836         if (!BNXT_CHIP_THOR(bp) ||
4837             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
4838             BNXT_VF(bp) ||
4839             bp->ctx)
4840                 return 0;
4841
4842         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
4843         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4844         HWRM_CHECK_RESULT_SILENT();
4845
4846         total_alloc_len = sizeof(*ctx);
4847         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
4848                           RTE_CACHE_LINE_SIZE);
4849         if (!ctx) {
4850                 rc = -ENOMEM;
4851                 goto ctx_err;
4852         }
4853
4854         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
4855         ctx->qp_min_qp1_entries =
4856                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
4857         ctx->qp_max_l2_entries =
4858                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
4859         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
4860         ctx->srq_max_l2_entries =
4861                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
4862         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
4863         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
4864         ctx->cq_max_l2_entries =
4865                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
4866         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
4867         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
4868         ctx->vnic_max_vnic_entries =
4869                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
4870         ctx->vnic_max_ring_table_entries =
4871                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
4872         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
4873         ctx->stat_max_entries =
4874                 rte_le_to_cpu_32(resp->stat_max_entries);
4875         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
4876         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
4877         ctx->tqm_min_entries_per_ring =
4878                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
4879         ctx->tqm_max_entries_per_ring =
4880                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
4881         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
4882         if (!ctx->tqm_entries_multiple)
4883                 ctx->tqm_entries_multiple = 1;
4884         ctx->mrav_max_entries =
4885                 rte_le_to_cpu_32(resp->mrav_max_entries);
4886         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
4887         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
4888         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
4889         ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
4890
4891         if (!ctx->tqm_fp_rings_count)
4892                 ctx->tqm_fp_rings_count = bp->max_q;
4893
4894         tqm_rings = ctx->tqm_fp_rings_count + 1;
4895
4896         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
4897                             sizeof(*ctx_pg) * tqm_rings,
4898                             RTE_CACHE_LINE_SIZE);
4899         if (!ctx_pg) {
4900                 rc = -ENOMEM;
4901                 goto ctx_err;
4902         }
4903         for (i = 0; i < tqm_rings; i++, ctx_pg++)
4904                 ctx->tqm_mem[i] = ctx_pg;
4905
4906         bp->ctx = ctx;
4907 ctx_err:
4908         HWRM_UNLOCK();
4909         return rc;
4910 }
4911
4912 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
4913 {
4914         struct hwrm_func_backing_store_cfg_input req = {0};
4915         struct hwrm_func_backing_store_cfg_output *resp =
4916                 bp->hwrm_cmd_resp_addr;
4917         struct bnxt_ctx_mem_info *ctx = bp->ctx;
4918         struct bnxt_ctx_pg_info *ctx_pg;
4919         uint32_t *num_entries;
4920         uint64_t *pg_dir;
4921         uint8_t *pg_attr;
4922         uint32_t ena;
4923         int i, rc;
4924
4925         if (!ctx)
4926                 return 0;
4927
4928         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
4929         req.enables = rte_cpu_to_le_32(enables);
4930
4931         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
4932                 ctx_pg = &ctx->qp_mem;
4933                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4934                 req.qp_num_qp1_entries =
4935                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
4936                 req.qp_num_l2_entries =
4937                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
4938                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
4939                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4940                                       &req.qpc_pg_size_qpc_lvl,
4941                                       &req.qpc_page_dir);
4942         }
4943
4944         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
4945                 ctx_pg = &ctx->srq_mem;
4946                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4947                 req.srq_num_l2_entries =
4948                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
4949                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
4950                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4951                                       &req.srq_pg_size_srq_lvl,
4952                                       &req.srq_page_dir);
4953         }
4954
4955         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
4956                 ctx_pg = &ctx->cq_mem;
4957                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4958                 req.cq_num_l2_entries =
4959                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
4960                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
4961                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4962                                       &req.cq_pg_size_cq_lvl,
4963                                       &req.cq_page_dir);
4964         }
4965
4966         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
4967                 ctx_pg = &ctx->vnic_mem;
4968                 req.vnic_num_vnic_entries =
4969                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
4970                 req.vnic_num_ring_table_entries =
4971                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
4972                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
4973                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4974                                       &req.vnic_pg_size_vnic_lvl,
4975                                       &req.vnic_page_dir);
4976         }
4977
4978         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
4979                 ctx_pg = &ctx->stat_mem;
4980                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
4981                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
4982                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4983                                       &req.stat_pg_size_stat_lvl,
4984                                       &req.stat_page_dir);
4985         }
4986
4987         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4988         num_entries = &req.tqm_sp_num_entries;
4989         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
4990         pg_dir = &req.tqm_sp_page_dir;
4991         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
4992         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
4993                 if (!(enables & ena))
4994                         continue;
4995
4996                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4997
4998                 ctx_pg = ctx->tqm_mem[i];
4999                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5000                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5001         }
5002
5003         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5004         HWRM_CHECK_RESULT();
5005         HWRM_UNLOCK();
5006
5007         return rc;
5008 }
5009
5010 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5011 {
5012         struct hwrm_port_qstats_ext_input req = {0};
5013         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5014         struct bnxt_pf_info *pf = bp->pf;
5015         int rc;
5016
5017         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5018               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5019                 return 0;
5020
5021         HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5022
5023         req.port_id = rte_cpu_to_le_16(pf->port_id);
5024         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5025                 req.tx_stat_host_addr =
5026                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5027                 req.tx_stat_size =
5028                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5029         }
5030         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5031                 req.rx_stat_host_addr =
5032                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5033                 req.rx_stat_size =
5034                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5035         }
5036         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5037
5038         if (rc) {
5039                 bp->fw_rx_port_stats_ext_size = 0;
5040                 bp->fw_tx_port_stats_ext_size = 0;
5041         } else {
5042                 bp->fw_rx_port_stats_ext_size =
5043                         rte_le_to_cpu_16(resp->rx_stat_size);
5044                 bp->fw_tx_port_stats_ext_size =
5045                         rte_le_to_cpu_16(resp->tx_stat_size);
5046         }
5047
5048         HWRM_CHECK_RESULT();
5049         HWRM_UNLOCK();
5050
5051         return rc;
5052 }
5053
5054 int
5055 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5056 {
5057         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5058         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5059                 bp->hwrm_cmd_resp_addr;
5060         int rc = 0;
5061
5062         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5063         req.tunnel_type = type;
5064         req.dest_fid = bp->fw_fid;
5065         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5066         HWRM_CHECK_RESULT();
5067
5068         HWRM_UNLOCK();
5069
5070         return rc;
5071 }
5072
5073 int
5074 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5075 {
5076         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5077         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5078                 bp->hwrm_cmd_resp_addr;
5079         int rc = 0;
5080
5081         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5082         req.tunnel_type = type;
5083         req.dest_fid = bp->fw_fid;
5084         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5085         HWRM_CHECK_RESULT();
5086
5087         HWRM_UNLOCK();
5088
5089         return rc;
5090 }
5091
5092 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5093 {
5094         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5095         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5096                 bp->hwrm_cmd_resp_addr;
5097         int rc = 0;
5098
5099         HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5100         req.src_fid = bp->fw_fid;
5101         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5102         HWRM_CHECK_RESULT();
5103
5104         if (type)
5105                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
5106
5107         HWRM_UNLOCK();
5108
5109         return rc;
5110 }
5111
5112 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5113                                    uint16_t *dst_fid)
5114 {
5115         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5116         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5117                 bp->hwrm_cmd_resp_addr;
5118         int rc = 0;
5119
5120         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5121         req.src_fid = bp->fw_fid;
5122         req.tunnel_type = tun_type;
5123         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5124         HWRM_CHECK_RESULT();
5125
5126         if (dst_fid)
5127                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5128
5129         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5130
5131         HWRM_UNLOCK();
5132
5133         return rc;
5134 }
5135
5136 int bnxt_hwrm_set_mac(struct bnxt *bp)
5137 {
5138         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5139         struct hwrm_func_vf_cfg_input req = {0};
5140         int rc = 0;
5141
5142         if (!BNXT_VF(bp))
5143                 return 0;
5144
5145         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5146
5147         req.enables =
5148                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5149         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5150
5151         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5152
5153         HWRM_CHECK_RESULT();
5154
5155         HWRM_UNLOCK();
5156
5157         return rc;
5158 }
5159
5160 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5161 {
5162         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5163         struct hwrm_func_drv_if_change_input req = {0};
5164         uint32_t flags;
5165         int rc;
5166
5167         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5168                 return 0;
5169
5170         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5171          * If we issue FUNC_DRV_IF_CHANGE with flags down before
5172          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5173          */
5174         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5175                 return 0;
5176
5177         HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5178
5179         if (up)
5180                 req.flags =
5181                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5182
5183         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5184
5185         HWRM_CHECK_RESULT();
5186         flags = rte_le_to_cpu_32(resp->flags);
5187         HWRM_UNLOCK();
5188
5189         if (!up)
5190                 return 0;
5191
5192         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5193                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5194                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5195         }
5196
5197         return 0;
5198 }
5199
5200 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5201 {
5202         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5203         struct bnxt_error_recovery_info *info = bp->recovery_info;
5204         struct hwrm_error_recovery_qcfg_input req = {0};
5205         uint32_t flags = 0;
5206         unsigned int i;
5207         int rc;
5208
5209         /* Older FW does not have error recovery support */
5210         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5211                 return 0;
5212
5213         HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5214
5215         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5216
5217         HWRM_CHECK_RESULT();
5218
5219         flags = rte_le_to_cpu_32(resp->flags);
5220         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5221                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5222         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5223                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5224
5225         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5226             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5227                 rc = -EINVAL;
5228                 goto err;
5229         }
5230
5231         /* FW returned values are in units of 100msec */
5232         info->driver_polling_freq =
5233                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5234         info->master_func_wait_period =
5235                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5236         info->normal_func_wait_period =
5237                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5238         info->master_func_wait_period_after_reset =
5239                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5240         info->max_bailout_time_after_reset =
5241                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5242         info->status_regs[BNXT_FW_STATUS_REG] =
5243                 rte_le_to_cpu_32(resp->fw_health_status_reg);
5244         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5245                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5246         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5247                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5248         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5249                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
5250         info->reg_array_cnt =
5251                 rte_le_to_cpu_32(resp->reg_array_cnt);
5252
5253         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5254                 rc = -EINVAL;
5255                 goto err;
5256         }
5257
5258         for (i = 0; i < info->reg_array_cnt; i++) {
5259                 info->reset_reg[i] =
5260                         rte_le_to_cpu_32(resp->reset_reg[i]);
5261                 info->reset_reg_val[i] =
5262                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
5263                 info->delay_after_reset[i] =
5264                         resp->delay_after_reset[i];
5265         }
5266 err:
5267         HWRM_UNLOCK();
5268
5269         /* Map the FW status registers */
5270         if (!rc)
5271                 rc = bnxt_map_fw_health_status_regs(bp);
5272
5273         if (rc) {
5274                 rte_free(bp->recovery_info);
5275                 bp->recovery_info = NULL;
5276         }
5277         return rc;
5278 }
5279
5280 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5281 {
5282         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5283         struct hwrm_fw_reset_input req = {0};
5284         int rc;
5285
5286         if (!BNXT_PF(bp))
5287                 return -EOPNOTSUPP;
5288
5289         HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5290
5291         req.embedded_proc_type =
5292                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5293         req.selfrst_status =
5294                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5295         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5296
5297         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5298                                     BNXT_USE_KONG(bp));
5299
5300         HWRM_CHECK_RESULT();
5301         HWRM_UNLOCK();
5302
5303         return rc;
5304 }
5305
5306 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5307 {
5308         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5309         struct hwrm_port_ts_query_input req = {0};
5310         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5311         uint32_t flags = 0;
5312         int rc;
5313
5314         if (!ptp)
5315                 return 0;
5316
5317         HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5318
5319         switch (path) {
5320         case BNXT_PTP_FLAGS_PATH_TX:
5321                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5322                 break;
5323         case BNXT_PTP_FLAGS_PATH_RX:
5324                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5325                 break;
5326         case BNXT_PTP_FLAGS_CURRENT_TIME:
5327                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5328                 break;
5329         }
5330
5331         req.flags = rte_cpu_to_le_32(flags);
5332         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5333
5334         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5335
5336         HWRM_CHECK_RESULT();
5337
5338         if (timestamp) {
5339                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5340                 *timestamp |=
5341                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5342         }
5343         HWRM_UNLOCK();
5344
5345         return rc;
5346 }
5347
5348 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5349 {
5350         int rc = 0;
5351
5352         struct hwrm_cfa_counter_qcaps_input req = {0};
5353         struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5354
5355         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5356                 PMD_DRV_LOG(DEBUG,
5357                             "Not a PF or trusted VF. Command not supported\n");
5358                 return 0;
5359         }
5360
5361         HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5362         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5363         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5364
5365         HWRM_CHECK_RESULT();
5366         if (max_fc)
5367                 *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5368         HWRM_UNLOCK();
5369
5370         return 0;
5371 }
5372
5373 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5374 {
5375         int rc = 0;
5376         struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5377         struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5378
5379         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5380                 PMD_DRV_LOG(DEBUG,
5381                             "Not a PF or trusted VF. Command not supported\n");
5382                 return 0;
5383         }
5384
5385         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5386
5387         req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5388         req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5389         req.page_dir = rte_cpu_to_le_64(dma_addr);
5390
5391         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5392
5393         HWRM_CHECK_RESULT();
5394         if (ctx_id) {
5395                 *ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
5396                 PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5397         }
5398         HWRM_UNLOCK();
5399
5400         return 0;
5401 }
5402
5403 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5404 {
5405         int rc = 0;
5406         struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5407         struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5408
5409         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5410                 PMD_DRV_LOG(DEBUG,
5411                             "Not a PF or trusted VF. Command not supported\n");
5412                 return 0;
5413         }
5414
5415         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5416
5417         req.ctx_id = rte_cpu_to_le_16(ctx_id);
5418
5419         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5420
5421         HWRM_CHECK_RESULT();
5422         HWRM_UNLOCK();
5423
5424         return rc;
5425 }
5426
5427 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5428                               uint16_t cntr, uint16_t ctx_id,
5429                               uint32_t num_entries, bool enable)
5430 {
5431         struct hwrm_cfa_counter_cfg_input req = {0};
5432         struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5433         uint16_t flags = 0;
5434         int rc;
5435
5436         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5437                 PMD_DRV_LOG(DEBUG,
5438                             "Not a PF or trusted VF. Command not supported\n");
5439                 return 0;
5440         }
5441
5442         HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5443
5444         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5445         req.counter_type = rte_cpu_to_le_16(cntr);
5446         flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5447                 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5448         flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5449         if (dir == BNXT_DIR_RX)
5450                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5451         else if (dir == BNXT_DIR_TX)
5452                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5453         req.flags = rte_cpu_to_le_16(flags);
5454         req.ctx_id =  rte_cpu_to_le_16(ctx_id);
5455         req.num_entries = rte_cpu_to_le_32(num_entries);
5456
5457         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5458         HWRM_CHECK_RESULT();
5459         HWRM_UNLOCK();
5460
5461         return 0;
5462 }
5463
5464 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5465                                  enum bnxt_flow_dir dir,
5466                                  uint16_t cntr,
5467                                  uint16_t num_entries)
5468 {
5469         struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5470         struct hwrm_cfa_counter_qstats_input req = {0};
5471         uint16_t flow_ctx_id = 0;
5472         uint16_t flags = 0;
5473         int rc = 0;
5474
5475         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5476                 PMD_DRV_LOG(DEBUG,
5477                             "Not a PF or trusted VF. Command not supported\n");
5478                 return 0;
5479         }
5480
5481         if (dir == BNXT_DIR_RX) {
5482                 flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5483                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5484         } else if (dir == BNXT_DIR_TX) {
5485                 flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5486                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5487         }
5488
5489         HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5490         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5491         req.counter_type = rte_cpu_to_le_16(cntr);
5492         req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5493         req.num_entries = rte_cpu_to_le_16(num_entries);
5494         req.flags = rte_cpu_to_le_16(flags);
5495         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5496
5497         HWRM_CHECK_RESULT();
5498         HWRM_UNLOCK();
5499
5500         return 0;
5501 }
5502
5503 int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx)
5504 {
5505         struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5506         struct hwrm_cfa_vfr_alloc_input req = {0};
5507         int rc;
5508
5509         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5510                 PMD_DRV_LOG(DEBUG,
5511                             "Not a PF or trusted VF. Command not supported\n");
5512                 return 0;
5513         }
5514
5515         HWRM_PREP(&req, HWRM_CFA_VFR_ALLOC, BNXT_USE_CHIMP_MB);
5516         req.vf_id = rte_cpu_to_le_16(vf_idx);
5517         snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
5518                  bp->eth_dev->data->name, vf_idx);
5519
5520         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5521         HWRM_CHECK_RESULT();
5522
5523         HWRM_UNLOCK();
5524         PMD_DRV_LOG(DEBUG, "VFR %d allocated\n", vf_idx);
5525         return rc;
5526 }
5527
5528 int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx)
5529 {
5530         struct hwrm_cfa_vfr_free_output *resp = bp->hwrm_cmd_resp_addr;
5531         struct hwrm_cfa_vfr_free_input req = {0};
5532         int rc;
5533
5534         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5535                 PMD_DRV_LOG(DEBUG,
5536                             "Not a PF or trusted VF. Command not supported\n");
5537                 return 0;
5538         }
5539
5540         HWRM_PREP(&req, HWRM_CFA_VFR_FREE, BNXT_USE_CHIMP_MB);
5541         req.vf_id = rte_cpu_to_le_16(vf_idx);
5542         snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
5543                  bp->eth_dev->data->name, vf_idx);
5544
5545         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5546         HWRM_CHECK_RESULT();
5547         HWRM_UNLOCK();
5548         PMD_DRV_LOG(DEBUG, "VFR %d freed\n", vf_idx);
5549         return rc;
5550 }