net/mlx5: rearrange creation of WQ and CQ object
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14 #include <rte_io.h>
15
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #define HWRM_SPEC_CODE_1_8_3            0x10803
28 #define HWRM_VERSION_1_9_1              0x10901
29 #define HWRM_VERSION_1_9_2              0x10903
30
31 struct bnxt_plcmodes_cfg {
32         uint32_t        flags;
33         uint16_t        jumbo_thresh;
34         uint16_t        hds_offset;
35         uint16_t        hds_threshold;
36 };
37
38 static int page_getenum(size_t size)
39 {
40         if (size <= 1 << 4)
41                 return 4;
42         if (size <= 1 << 12)
43                 return 12;
44         if (size <= 1 << 13)
45                 return 13;
46         if (size <= 1 << 16)
47                 return 16;
48         if (size <= 1 << 21)
49                 return 21;
50         if (size <= 1 << 22)
51                 return 22;
52         if (size <= 1 << 30)
53                 return 30;
54         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55         return sizeof(void *) * 8 - 1;
56 }
57
58 static int page_roundup(size_t size)
59 {
60         return 1 << page_getenum(size);
61 }
62
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
64                                   uint8_t *pg_attr,
65                                   uint64_t *pg_dir)
66 {
67         if (rmem->nr_pages > 1) {
68                 *pg_attr = 1;
69                 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
70         } else {
71                 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
72         }
73 }
74
75 /*
76  * HWRM Functions (sent to HWRM)
77  * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
78  * HWRM command times out, or a negative error code if the HWRM
79  * command was failed by the FW.
80  */
81
82 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
83                                   uint32_t msg_len, bool use_kong_mb)
84 {
85         unsigned int i;
86         struct input *req = msg;
87         struct output *resp = bp->hwrm_cmd_resp_addr;
88         uint32_t *data = msg;
89         uint8_t *bar;
90         uint8_t *valid;
91         uint16_t max_req_len = bp->max_req_len;
92         struct hwrm_short_input short_input = { 0 };
93         uint16_t bar_offset = use_kong_mb ?
94                 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
95         uint16_t mb_trigger_offset = use_kong_mb ?
96                 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
97         uint32_t timeout;
98
99         /* Do not send HWRM commands to firmware in error state */
100         if (bp->flags & BNXT_FLAG_FATAL_ERROR)
101                 return 0;
102
103         timeout = bp->hwrm_cmd_timeout;
104
105         if (bp->flags & BNXT_FLAG_SHORT_CMD ||
106             msg_len > bp->max_req_len) {
107                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
108
109                 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
110                 memcpy(short_cmd_req, req, msg_len);
111
112                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
113                 short_input.signature = rte_cpu_to_le_16(
114                                         HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
115                 short_input.size = rte_cpu_to_le_16(msg_len);
116                 short_input.req_addr =
117                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
118
119                 data = (uint32_t *)&short_input;
120                 msg_len = sizeof(short_input);
121
122                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
123         }
124
125         /* Write request msg to hwrm channel */
126         for (i = 0; i < msg_len; i += 4) {
127                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
128                 rte_write32(*data, bar);
129                 data++;
130         }
131
132         /* Zero the rest of the request space */
133         for (; i < max_req_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + bar_offset + i;
135                 rte_write32(0, bar);
136         }
137
138         /* Ring channel doorbell */
139         bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
140         rte_write32(1, bar);
141         /*
142          * Make sure the channel doorbell ring command complete before
143          * reading the response to avoid getting stale or invalid
144          * responses.
145          */
146         rte_io_mb();
147
148         /* Poll for the valid bit */
149         for (i = 0; i < timeout; i++) {
150                 /* Sanity check on the resp->resp_len */
151                 rte_cio_rmb();
152                 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
153                         /* Last byte of resp contains the valid key */
154                         valid = (uint8_t *)resp + resp->resp_len - 1;
155                         if (*valid == HWRM_RESP_VALID_KEY)
156                                 break;
157                 }
158                 rte_delay_us(1);
159         }
160
161         if (i >= timeout) {
162                 /* Suppress VER_GET timeout messages during reset recovery */
163                 if (bp->flags & BNXT_FLAG_FW_RESET &&
164                     rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
165                         return -ETIMEDOUT;
166
167                 PMD_DRV_LOG(ERR,
168                             "Error(timeout) sending msg 0x%04x, seq_id %d\n",
169                             req->req_type, req->seq_id);
170                 return -ETIMEDOUT;
171         }
172         return 0;
173 }
174
175 /*
176  * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
177  * spinlock, and does initial processing.
178  *
179  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
180  * releases the spinlock only if it returns. If the regular int return codes
181  * are not used by the function, HWRM_CHECK_RESULT() should not be used
182  * directly, rather it should be copied and modified to suit the function.
183  *
184  * HWRM_UNLOCK() must be called after all response processing is completed.
185  */
186 #define HWRM_PREP(req, type, kong) do { \
187         rte_spinlock_lock(&bp->hwrm_lock); \
188         if (bp->hwrm_cmd_resp_addr == NULL) { \
189                 rte_spinlock_unlock(&bp->hwrm_lock); \
190                 return -EACCES; \
191         } \
192         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
193         (req)->req_type = rte_cpu_to_le_16(type); \
194         (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
195         (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
196                 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
197         (req)->target_id = rte_cpu_to_le_16(0xffff); \
198         (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
199 } while (0)
200
201 #define HWRM_CHECK_RESULT_SILENT() do {\
202         if (rc) { \
203                 rte_spinlock_unlock(&bp->hwrm_lock); \
204                 return rc; \
205         } \
206         if (resp->error_code) { \
207                 rc = rte_le_to_cpu_16(resp->error_code); \
208                 rte_spinlock_unlock(&bp->hwrm_lock); \
209                 return rc; \
210         } \
211 } while (0)
212
213 #define HWRM_CHECK_RESULT() do {\
214         if (rc) { \
215                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
216                 rte_spinlock_unlock(&bp->hwrm_lock); \
217                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
218                         rc = -EACCES; \
219                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
220                         rc = -ENOSPC; \
221                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
222                         rc = -EINVAL; \
223                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
224                         rc = -ENOTSUP; \
225                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
226                         rc = -EAGAIN; \
227                 else if (rc > 0) \
228                         rc = -EIO; \
229                 return rc; \
230         } \
231         if (resp->error_code) { \
232                 rc = rte_le_to_cpu_16(resp->error_code); \
233                 if (resp->resp_len >= 16) { \
234                         struct hwrm_err_output *tmp_hwrm_err_op = \
235                                                 (void *)resp; \
236                         PMD_DRV_LOG(ERR, \
237                                 "error %d:%d:%08x:%04x\n", \
238                                 rc, tmp_hwrm_err_op->cmd_err, \
239                                 rte_le_to_cpu_32(\
240                                         tmp_hwrm_err_op->opaque_0), \
241                                 rte_le_to_cpu_16(\
242                                         tmp_hwrm_err_op->opaque_1)); \
243                 } else { \
244                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
245                 } \
246                 rte_spinlock_unlock(&bp->hwrm_lock); \
247                 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
248                         rc = -EACCES; \
249                 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
250                         rc = -ENOSPC; \
251                 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
252                         rc = -EINVAL; \
253                 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
254                         rc = -ENOTSUP; \
255                 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
256                         rc = -EAGAIN; \
257                 else if (rc > 0) \
258                         rc = -EIO; \
259                 return rc; \
260         } \
261 } while (0)
262
263 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
264
265 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
266                                 bool use_kong_mb,
267                                 uint16_t msg_type,
268                                 void *msg,
269                                 uint32_t msg_len,
270                                 void *resp_msg,
271                                 uint32_t resp_len)
272 {
273         int rc = 0;
274         bool mailbox = BNXT_USE_CHIMP_MB;
275         struct input *req = msg;
276         struct output *resp = bp->hwrm_cmd_resp_addr;
277
278         if (use_kong_mb)
279                 mailbox = BNXT_USE_KONG(bp);
280
281         HWRM_PREP(req, msg_type, mailbox);
282
283         rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
284
285         HWRM_CHECK_RESULT();
286
287         if (resp_msg)
288                 memcpy(resp_msg, resp, resp_len);
289
290         HWRM_UNLOCK();
291
292         return rc;
293 }
294
295 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
296                                   bool use_kong_mb,
297                                   uint16_t tf_type,
298                                   uint16_t tf_subtype,
299                                   uint32_t *tf_response_code,
300                                   void *msg,
301                                   uint32_t msg_len,
302                                   void *response,
303                                   uint32_t response_len)
304 {
305         int rc = 0;
306         struct hwrm_cfa_tflib_input req = { .req_type = 0 };
307         struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
308         bool mailbox = BNXT_USE_CHIMP_MB;
309
310         if (msg_len > sizeof(req.tf_req))
311                 return -ENOMEM;
312
313         if (use_kong_mb)
314                 mailbox = BNXT_USE_KONG(bp);
315
316         HWRM_PREP(&req, HWRM_TF, mailbox);
317         /* Build request using the user supplied request payload.
318          * TLV request size is checked at build time against HWRM
319          * request max size, thus no checking required.
320          */
321         req.tf_type = tf_type;
322         req.tf_subtype = tf_subtype;
323         memcpy(req.tf_req, msg, msg_len);
324
325         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
326         HWRM_CHECK_RESULT();
327
328         /* Copy the resp to user provided response buffer */
329         if (response != NULL)
330                 /* Post process response data. We need to copy only
331                  * the 'payload' as the HWRM data structure really is
332                  * HWRM header + msg header + payload and the TFLIB
333                  * only provided a payload place holder.
334                  */
335                 if (response_len != 0) {
336                         memcpy(response,
337                                resp->tf_resp,
338                                response_len);
339                 }
340
341         /* Extract the internal tflib response code */
342         *tf_response_code = resp->tf_resp_code;
343         HWRM_UNLOCK();
344
345         return rc;
346 }
347
348 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
349 {
350         int rc = 0;
351         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
352         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
353
354         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
355         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
356         req.mask = 0;
357
358         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
359
360         HWRM_CHECK_RESULT();
361         HWRM_UNLOCK();
362
363         return rc;
364 }
365
366 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
367                                  struct bnxt_vnic_info *vnic,
368                                  uint16_t vlan_count,
369                                  struct bnxt_vlan_table_entry *vlan_table)
370 {
371         int rc = 0;
372         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
373         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
374         uint32_t mask = 0;
375
376         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
377                 return rc;
378
379         HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
380         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
381
382         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
383                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
384         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
385                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
386
387         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
388                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
389
390         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
391                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
392         } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
393                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
394                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
395                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
396         }
397         if (vlan_table) {
398                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
399                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
400                 req.vlan_tag_tbl_addr =
401                         rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
402                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
403         }
404         req.mask = rte_cpu_to_le_32(mask);
405
406         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
407
408         HWRM_CHECK_RESULT();
409         HWRM_UNLOCK();
410
411         return rc;
412 }
413
414 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
415                         uint16_t vlan_count,
416                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
417 {
418         int rc = 0;
419         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
420         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
421                                                 bp->hwrm_cmd_resp_addr;
422
423         /*
424          * Older HWRM versions did not support this command, and the set_rx_mask
425          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
426          * removed from set_rx_mask call, and this command was added.
427          *
428          * This command is also present from 1.7.8.11 and higher,
429          * as well as 1.7.8.0
430          */
431         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
432                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
433                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
434                                         (11)))
435                                 return 0;
436                 }
437         }
438         HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
439         req.fid = rte_cpu_to_le_16(fid);
440
441         req.vlan_tag_mask_tbl_addr =
442                 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
443         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
444
445         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
446
447         HWRM_CHECK_RESULT();
448         HWRM_UNLOCK();
449
450         return rc;
451 }
452
453 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
454                              struct bnxt_filter_info *filter)
455 {
456         int rc = 0;
457         struct bnxt_filter_info *l2_filter = filter;
458         struct bnxt_vnic_info *vnic = NULL;
459         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
460         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
461
462         if (filter->fw_l2_filter_id == UINT64_MAX)
463                 return 0;
464
465         if (filter->matching_l2_fltr_ptr)
466                 l2_filter = filter->matching_l2_fltr_ptr;
467
468         PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
469                     filter, l2_filter, l2_filter->l2_ref_cnt);
470
471         if (l2_filter->l2_ref_cnt == 0)
472                 return 0;
473
474         if (l2_filter->l2_ref_cnt > 0)
475                 l2_filter->l2_ref_cnt--;
476
477         if (l2_filter->l2_ref_cnt > 0)
478                 return 0;
479
480         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
481
482         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
483
484         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
485
486         HWRM_CHECK_RESULT();
487         HWRM_UNLOCK();
488
489         filter->fw_l2_filter_id = UINT64_MAX;
490         if (l2_filter->l2_ref_cnt == 0) {
491                 vnic = l2_filter->vnic;
492                 if (vnic) {
493                         STAILQ_REMOVE(&vnic->filter, l2_filter,
494                                       bnxt_filter_info, next);
495                         bnxt_free_filter(bp, l2_filter);
496                 }
497         }
498
499         return 0;
500 }
501
502 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
503                          uint16_t dst_id,
504                          struct bnxt_filter_info *filter)
505 {
506         int rc = 0;
507         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
508         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
509         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
510         const struct rte_eth_vmdq_rx_conf *conf =
511                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
512         uint32_t enables = 0;
513         uint16_t j = dst_id - 1;
514
515         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
516         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
517             conf->pool_map[j].pools & (1UL << j)) {
518                 PMD_DRV_LOG(DEBUG,
519                         "Add vlan %u to vmdq pool %u\n",
520                         conf->pool_map[j].vlan_id, j);
521
522                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
523                 filter->enables |=
524                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
525                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
526         }
527
528         if (filter->fw_l2_filter_id != UINT64_MAX)
529                 bnxt_hwrm_clear_l2_filter(bp, filter);
530
531         HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
532
533         /* PMD does not support XDP and RoCE */
534         filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
535                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
536         req.flags = rte_cpu_to_le_32(filter->flags);
537
538         enables = filter->enables |
539               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
540         req.dst_id = rte_cpu_to_le_16(dst_id);
541
542         if (enables &
543             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
544                 memcpy(req.l2_addr, filter->l2_addr,
545                        RTE_ETHER_ADDR_LEN);
546         if (enables &
547             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
548                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
549                        RTE_ETHER_ADDR_LEN);
550         if (enables &
551             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
552                 req.l2_ovlan = filter->l2_ovlan;
553         if (enables &
554             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
555                 req.l2_ivlan = filter->l2_ivlan;
556         if (enables &
557             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
558                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
559         if (enables &
560             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
561                 req.l2_ivlan_mask = filter->l2_ivlan_mask;
562         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
563                 req.src_id = rte_cpu_to_le_32(filter->src_id);
564         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
565                 req.src_type = filter->src_type;
566         if (filter->pri_hint) {
567                 req.pri_hint = filter->pri_hint;
568                 req.l2_filter_id_hint =
569                         rte_cpu_to_le_64(filter->l2_filter_id_hint);
570         }
571
572         req.enables = rte_cpu_to_le_32(enables);
573
574         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
575
576         HWRM_CHECK_RESULT();
577
578         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
579         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
580         HWRM_UNLOCK();
581
582         filter->l2_ref_cnt++;
583
584         return rc;
585 }
586
587 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
588 {
589         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
590         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
591         uint32_t flags = 0;
592         int rc;
593
594         if (!ptp)
595                 return 0;
596
597         HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
598
599         if (ptp->rx_filter)
600                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
601         else
602                 flags |=
603                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
604         if (ptp->tx_tstamp_en)
605                 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
606         else
607                 flags |=
608                         HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
609         req.flags = rte_cpu_to_le_32(flags);
610         req.enables = rte_cpu_to_le_32
611                 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
612         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
613
614         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
615         HWRM_UNLOCK();
616
617         return rc;
618 }
619
620 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
621 {
622         int rc = 0;
623         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
624         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
625         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
626
627         if (ptp)
628                 return 0;
629
630         HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
631
632         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
633
634         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
635
636         HWRM_CHECK_RESULT();
637
638         if (!BNXT_CHIP_THOR(bp) &&
639             !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
640                 return 0;
641
642         if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
643                 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
644
645         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
646         if (!ptp)
647                 return -ENOMEM;
648
649         if (!BNXT_CHIP_THOR(bp)) {
650                 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
651                         rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
652                 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
653                         rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
654                 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
655                         rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
656                 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
657                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
658                 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
659                         rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
660                 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
661                         rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
662                 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
663                         rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
664                 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
665                         rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
666                 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
667                         rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
668         }
669
670         ptp->bp = bp;
671         bp->ptp_cfg = ptp;
672
673         return 0;
674 }
675
676 void bnxt_hwrm_free_vf_info(struct bnxt *bp)
677 {
678         int i;
679
680         for (i = 0; i < bp->pf->max_vfs; i++) {
681                 rte_free(bp->pf->vf_info[i].vlan_table);
682                 bp->pf->vf_info[i].vlan_table = NULL;
683                 rte_free(bp->pf->vf_info[i].vlan_as_table);
684                 bp->pf->vf_info[i].vlan_as_table = NULL;
685         }
686         rte_free(bp->pf->vf_info);
687         bp->pf->vf_info = NULL;
688 }
689
690 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
691 {
692         int rc = 0;
693         struct hwrm_func_qcaps_input req = {.req_type = 0 };
694         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
695         uint16_t new_max_vfs;
696         uint32_t flags;
697         int i;
698
699         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
700
701         req.fid = rte_cpu_to_le_16(0xffff);
702
703         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
704
705         HWRM_CHECK_RESULT();
706
707         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
708         flags = rte_le_to_cpu_32(resp->flags);
709         if (BNXT_PF(bp)) {
710                 bp->pf->port_id = resp->port_id;
711                 bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
712                 bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
713                 new_max_vfs = bp->pdev->max_vfs;
714                 if (new_max_vfs != bp->pf->max_vfs) {
715                         if (bp->pf->vf_info)
716                                 bnxt_hwrm_free_vf_info(bp);
717                         bp->pf->vf_info = rte_zmalloc("bnxt_vf_info",
718                             sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
719                         if (bp->pf->vf_info == NULL) {
720                                 PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
721                                 return -ENOMEM;
722                         }
723                         bp->pf->max_vfs = new_max_vfs;
724                         for (i = 0; i < new_max_vfs; i++) {
725                                 bp->pf->vf_info[i].fid =
726                                         bp->pf->first_vf_id + i;
727                                 bp->pf->vf_info[i].vlan_table =
728                                         rte_zmalloc("VF VLAN table",
729                                                     getpagesize(),
730                                                     getpagesize());
731                                 if (bp->pf->vf_info[i].vlan_table == NULL)
732                                         PMD_DRV_LOG(ERR,
733                                         "Fail to alloc VLAN table for VF %d\n",
734                                         i);
735                                 else
736                                         rte_mem_lock_page(
737                                                 bp->pf->vf_info[i].vlan_table);
738                                 bp->pf->vf_info[i].vlan_as_table =
739                                         rte_zmalloc("VF VLAN AS table",
740                                                     getpagesize(),
741                                                     getpagesize());
742                                 if (bp->pf->vf_info[i].vlan_as_table == NULL)
743                                         PMD_DRV_LOG(ERR,
744                                         "Alloc VLAN AS table for VF %d fail\n",
745                                         i);
746                                 else
747                                         rte_mem_lock_page(
748                                               bp->pf->vf_info[i].vlan_as_table);
749                                 STAILQ_INIT(&bp->pf->vf_info[i].filter);
750                         }
751                 }
752         }
753
754         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
755         if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
756                 bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
757                 memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
758         } else {
759                 bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
760         }
761         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
762         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
763         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
764         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
765         bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
766         bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
767         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
768         if (!BNXT_CHIP_THOR(bp))
769                 bp->max_l2_ctx += bp->max_rx_em_flows;
770         /* TODO: For now, do not support VMDq/RFS on VFs. */
771         if (BNXT_PF(bp)) {
772                 if (bp->pf->max_vfs)
773                         bp->max_vnics = 1;
774                 else
775                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
776         } else {
777                 bp->max_vnics = 1;
778         }
779         PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
780                     bp->max_l2_ctx, bp->max_vnics);
781         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
782         if (BNXT_PF(bp)) {
783                 bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
784                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
785                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
786                         PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
787                         HWRM_UNLOCK();
788                         bnxt_hwrm_ptp_qcfg(bp);
789                 }
790         }
791
792         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
793                 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
794
795         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
796                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
797                 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
798         }
799
800         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
801                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
802
803         if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
804                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
805
806         HWRM_UNLOCK();
807
808         return rc;
809 }
810
811 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
812 {
813         int rc;
814
815         rc = __bnxt_hwrm_func_qcaps(bp);
816         if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
817                 rc = bnxt_alloc_ctx_mem(bp);
818                 if (rc)
819                         return rc;
820
821                 rc = bnxt_hwrm_func_resc_qcaps(bp);
822                 if (!rc)
823                         bp->flags |= BNXT_FLAG_NEW_RM;
824         }
825
826         /* On older FW,
827          * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
828          * But the error can be ignored. Return success.
829          */
830
831         return 0;
832 }
833
834 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
835 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
836 {
837         int rc = 0;
838         struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
839         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
840
841         HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
842
843         req.target_id = rte_cpu_to_le_16(0xffff);
844
845         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
846
847         HWRM_CHECK_RESULT();
848
849         if (rte_le_to_cpu_32(resp->flags) &
850             HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
851                 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
852                 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
853         }
854
855         bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
856
857         HWRM_UNLOCK();
858
859         return rc;
860 }
861
862 int bnxt_hwrm_func_reset(struct bnxt *bp)
863 {
864         int rc = 0;
865         struct hwrm_func_reset_input req = {.req_type = 0 };
866         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
867
868         HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
869
870         req.enables = rte_cpu_to_le_32(0);
871
872         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
873
874         HWRM_CHECK_RESULT();
875         HWRM_UNLOCK();
876
877         return rc;
878 }
879
880 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
881 {
882         int rc;
883         uint32_t flags = 0;
884         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
885         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
886
887         if (bp->flags & BNXT_FLAG_REGISTERED)
888                 return 0;
889
890         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
891                 flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
892         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
893                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
894
895         /* PFs and trusted VFs should indicate the support of the
896          * Master capability on non Stingray platform
897          */
898         if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
899                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
900
901         HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
902         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
903                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
904         req.ver_maj = RTE_VER_YEAR;
905         req.ver_min = RTE_VER_MONTH;
906         req.ver_upd = RTE_VER_MINOR;
907
908         if (BNXT_PF(bp)) {
909                 req.enables |= rte_cpu_to_le_32(
910                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
911                 memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
912                        RTE_MIN(sizeof(req.vf_req_fwd),
913                                sizeof(bp->pf->vf_req_fwd)));
914
915                 /*
916                  * PF can sniff HWRM API issued by VF. This can be set up by
917                  * linux driver and inherited by the DPDK PF driver. Clear
918                  * this HWRM sniffer list in FW because DPDK PF driver does
919                  * not support this.
920                  */
921                 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
922         }
923
924         req.flags = rte_cpu_to_le_32(flags);
925
926         req.async_event_fwd[0] |=
927                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
928                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
929                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
930                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
931                                  ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
932         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
933                 req.async_event_fwd[0] |=
934                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
935         req.async_event_fwd[1] |=
936                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
937                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
938         if (BNXT_PF(bp))
939                 req.async_event_fwd[1] |=
940                         rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
941
942         if (BNXT_VF_IS_TRUSTED(bp))
943                 req.async_event_fwd[1] |=
944                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
945
946         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
947
948         HWRM_CHECK_RESULT();
949
950         flags = rte_le_to_cpu_32(resp->flags);
951         if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
952                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
953
954         HWRM_UNLOCK();
955
956         bp->flags |= BNXT_FLAG_REGISTERED;
957
958         return rc;
959 }
960
961 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
962 {
963         if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
964                 return 0;
965
966         return bnxt_hwrm_func_reserve_vf_resc(bp, true);
967 }
968
969 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
970 {
971         int rc;
972         uint32_t flags = 0;
973         uint32_t enables;
974         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
975         struct hwrm_func_vf_cfg_input req = {0};
976
977         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
978
979         enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
980                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
981                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
982                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
983                   HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
984
985         if (BNXT_HAS_RING_GRPS(bp)) {
986                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
987                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
988         }
989
990         req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
991         req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
992                                             AGG_RING_MULTIPLIER);
993         req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
994         req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
995                                               bp->tx_nr_rings +
996                                               BNXT_NUM_ASYNC_CPR(bp));
997         req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
998         if (bp->vf_resv_strategy ==
999             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
1000                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1001                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1002                            HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1003                 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1004                 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1005                 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1006         } else if (bp->vf_resv_strategy ==
1007                    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1008                 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1009                 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1010         }
1011
1012         if (test)
1013                 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1014                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1015                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1016                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1017                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1018                         HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1019
1020         if (test && BNXT_HAS_RING_GRPS(bp))
1021                 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1022
1023         req.flags = rte_cpu_to_le_32(flags);
1024         req.enables |= rte_cpu_to_le_32(enables);
1025
1026         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1027
1028         if (test)
1029                 HWRM_CHECK_RESULT_SILENT();
1030         else
1031                 HWRM_CHECK_RESULT();
1032
1033         HWRM_UNLOCK();
1034         return rc;
1035 }
1036
1037 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1038 {
1039         int rc;
1040         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1041         struct hwrm_func_resource_qcaps_input req = {0};
1042
1043         HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1044         req.fid = rte_cpu_to_le_16(0xffff);
1045
1046         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1047
1048         HWRM_CHECK_RESULT_SILENT();
1049
1050         if (BNXT_VF(bp)) {
1051                 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1052                 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1053                 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1054                 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1055                 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1056                 /* func_resource_qcaps does not return max_rx_em_flows.
1057                  * So use the value provided by func_qcaps.
1058                  */
1059                 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1060                 if (!BNXT_CHIP_THOR(bp))
1061                         bp->max_l2_ctx += bp->max_rx_em_flows;
1062                 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1063                 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1064         }
1065         bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1066         bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1067         if (bp->vf_resv_strategy >
1068             HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1069                 bp->vf_resv_strategy =
1070                 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1071
1072         HWRM_UNLOCK();
1073         return rc;
1074 }
1075
1076 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1077 {
1078         int rc = 0;
1079         struct hwrm_ver_get_input req = {.req_type = 0 };
1080         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1081         uint32_t fw_version;
1082         uint16_t max_resp_len;
1083         char type[RTE_MEMZONE_NAMESIZE];
1084         uint32_t dev_caps_cfg;
1085
1086         bp->max_req_len = HWRM_MAX_REQ_LEN;
1087         bp->hwrm_cmd_timeout = timeout;
1088         HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1089
1090         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1091         req.hwrm_intf_min = HWRM_VERSION_MINOR;
1092         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1093
1094         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1095
1096         if (bp->flags & BNXT_FLAG_FW_RESET)
1097                 HWRM_CHECK_RESULT_SILENT();
1098         else
1099                 HWRM_CHECK_RESULT();
1100
1101         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
1102                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1103                 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1104                 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
1105         bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
1106                      (resp->hwrm_fw_min_8b << 16) |
1107                      (resp->hwrm_fw_bld_8b << 8) |
1108                      resp->hwrm_fw_rsvd_8b;
1109         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1110                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1111
1112         fw_version = resp->hwrm_intf_maj_8b << 16;
1113         fw_version |= resp->hwrm_intf_min_8b << 8;
1114         fw_version |= resp->hwrm_intf_upd_8b;
1115         bp->hwrm_spec_code = fw_version;
1116
1117         /* def_req_timeout value is in milliseconds */
1118         bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1119         /* convert timeout to usec */
1120         bp->hwrm_cmd_timeout *= 1000;
1121         if (!bp->hwrm_cmd_timeout)
1122                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1123
1124         if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1125                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1126                 rc = -EINVAL;
1127                 goto error;
1128         }
1129
1130         if (bp->max_req_len > resp->max_req_win_len) {
1131                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
1132                 rc = -EINVAL;
1133         }
1134         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1135         bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1136         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1137                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1138
1139         max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1140         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1141
1142         if (bp->max_resp_len != max_resp_len) {
1143                 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
1144                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1145                         bp->pdev->addr.devid, bp->pdev->addr.function);
1146
1147                 rte_free(bp->hwrm_cmd_resp_addr);
1148
1149                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
1150                 if (bp->hwrm_cmd_resp_addr == NULL) {
1151                         rc = -ENOMEM;
1152                         goto error;
1153                 }
1154                 bp->hwrm_cmd_resp_dma_addr =
1155                         rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
1156                 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1157                         PMD_DRV_LOG(ERR,
1158                         "Unable to map response buffer to physical memory.\n");
1159                         rc = -ENOMEM;
1160                         goto error;
1161                 }
1162                 bp->max_resp_len = max_resp_len;
1163         }
1164
1165         if ((dev_caps_cfg &
1166                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1167             (dev_caps_cfg &
1168              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1169                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1170                 bp->flags |= BNXT_FLAG_SHORT_CMD;
1171         }
1172
1173         if (((dev_caps_cfg &
1174               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1175              (dev_caps_cfg &
1176               HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1177             bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1178                 sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1179                         bp->pdev->addr.domain, bp->pdev->addr.bus,
1180                         bp->pdev->addr.devid, bp->pdev->addr.function);
1181
1182                 rte_free(bp->hwrm_short_cmd_req_addr);
1183
1184                 bp->hwrm_short_cmd_req_addr =
1185                                 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1186                 if (bp->hwrm_short_cmd_req_addr == NULL) {
1187                         rc = -ENOMEM;
1188                         goto error;
1189                 }
1190                 bp->hwrm_short_cmd_req_dma_addr =
1191                         rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1192                 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1193                         rte_free(bp->hwrm_short_cmd_req_addr);
1194                         PMD_DRV_LOG(ERR,
1195                                 "Unable to map buffer to physical memory.\n");
1196                         rc = -ENOMEM;
1197                         goto error;
1198                 }
1199         }
1200         if (dev_caps_cfg &
1201             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1202                 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1203                 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1204         }
1205         if (dev_caps_cfg &
1206             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1207                 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1208         if (dev_caps_cfg &
1209             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1210                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1211                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1212         }
1213
1214         if (dev_caps_cfg &
1215             HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1216                 PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1217                 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1218         }
1219
1220
1221 error:
1222         HWRM_UNLOCK();
1223         return rc;
1224 }
1225
1226 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1227 {
1228         int rc;
1229         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1230         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1231
1232         if (!(bp->flags & BNXT_FLAG_REGISTERED))
1233                 return 0;
1234
1235         HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1236         req.flags = flags;
1237
1238         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1239
1240         HWRM_CHECK_RESULT();
1241         HWRM_UNLOCK();
1242
1243         return rc;
1244 }
1245
1246 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1247 {
1248         int rc = 0;
1249         struct hwrm_port_phy_cfg_input req = {0};
1250         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1251         uint32_t enables = 0;
1252
1253         HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1254
1255         if (conf->link_up) {
1256                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1257                 if (bp->link_info->auto_mode && conf->link_speed) {
1258                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1259                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1260                 }
1261
1262                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1263                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
1264                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1265                 /*
1266                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1267                  * any auto mode, even "none".
1268                  */
1269                 if (!conf->link_speed) {
1270                         /* No speeds specified. Enable AutoNeg - all speeds */
1271                         req.auto_mode =
1272                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1273                 }
1274                 /* AutoNeg - Advertise speeds specified. */
1275                 if (conf->auto_link_speed_mask &&
1276                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1277                         req.auto_mode =
1278                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1279                         req.auto_link_speed_mask =
1280                                 conf->auto_link_speed_mask;
1281                         enables |=
1282                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
1283                 }
1284
1285                 req.auto_duplex = conf->duplex;
1286                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1287                 req.auto_pause = conf->auto_pause;
1288                 req.force_pause = conf->force_pause;
1289                 /* Set force_pause if there is no auto or if there is a force */
1290                 if (req.auto_pause && !req.force_pause)
1291                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1292                 else
1293                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1294
1295                 req.enables = rte_cpu_to_le_32(enables);
1296         } else {
1297                 req.flags =
1298                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1299                 PMD_DRV_LOG(INFO, "Force Link Down\n");
1300         }
1301
1302         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1303
1304         HWRM_CHECK_RESULT();
1305         HWRM_UNLOCK();
1306
1307         return rc;
1308 }
1309
1310 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1311                                    struct bnxt_link_info *link_info)
1312 {
1313         int rc = 0;
1314         struct hwrm_port_phy_qcfg_input req = {0};
1315         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1316
1317         HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1318
1319         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1320
1321         HWRM_CHECK_RESULT();
1322
1323         link_info->phy_link_status = resp->link;
1324         link_info->link_up =
1325                 (link_info->phy_link_status ==
1326                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1327         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1328         link_info->duplex = resp->duplex_cfg;
1329         link_info->pause = resp->pause;
1330         link_info->auto_pause = resp->auto_pause;
1331         link_info->force_pause = resp->force_pause;
1332         link_info->auto_mode = resp->auto_mode;
1333         link_info->phy_type = resp->phy_type;
1334         link_info->media_type = resp->media_type;
1335
1336         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1337         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1338         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1339         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1340         link_info->phy_ver[0] = resp->phy_maj;
1341         link_info->phy_ver[1] = resp->phy_min;
1342         link_info->phy_ver[2] = resp->phy_bld;
1343
1344         HWRM_UNLOCK();
1345
1346         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1347         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1348         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1349         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1350         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1351                     link_info->auto_link_speed_mask);
1352         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1353                     link_info->force_link_speed);
1354
1355         return rc;
1356 }
1357
1358 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1359 {
1360         int rc = 0;
1361         struct hwrm_port_phy_qcaps_input req = {0};
1362         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1363
1364         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1365                 return 0;
1366
1367         HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1368
1369         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1370
1371         HWRM_CHECK_RESULT();
1372
1373         bp->port_cnt = resp->port_cnt;
1374
1375         HWRM_UNLOCK();
1376
1377         return 0;
1378 }
1379
1380 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1381 {
1382         int i = 0;
1383
1384         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1385                 if (bp->tx_cos_queue[i].profile ==
1386                     HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1387                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1388                         return true;
1389                 }
1390         }
1391         return false;
1392 }
1393
1394 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1395 {
1396         int i = 0;
1397
1398         for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1399                 if (bp->tx_cos_queue[i].profile !=
1400                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1401                     bp->tx_cos_queue[i].id !=
1402                     HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1403                         bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1404                         break;
1405                 }
1406         }
1407 }
1408
1409 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1410 {
1411         int rc = 0;
1412         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1413         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1414         uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1415         int i;
1416
1417 get_rx_info:
1418         HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1419
1420         req.flags = rte_cpu_to_le_32(dir);
1421         /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1422         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1423             !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1424                 req.drv_qmap_cap =
1425                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1426         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1427
1428         HWRM_CHECK_RESULT();
1429
1430         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1431                 GET_TX_QUEUE_INFO(0);
1432                 GET_TX_QUEUE_INFO(1);
1433                 GET_TX_QUEUE_INFO(2);
1434                 GET_TX_QUEUE_INFO(3);
1435                 GET_TX_QUEUE_INFO(4);
1436                 GET_TX_QUEUE_INFO(5);
1437                 GET_TX_QUEUE_INFO(6);
1438                 GET_TX_QUEUE_INFO(7);
1439         } else  {
1440                 GET_RX_QUEUE_INFO(0);
1441                 GET_RX_QUEUE_INFO(1);
1442                 GET_RX_QUEUE_INFO(2);
1443                 GET_RX_QUEUE_INFO(3);
1444                 GET_RX_QUEUE_INFO(4);
1445                 GET_RX_QUEUE_INFO(5);
1446                 GET_RX_QUEUE_INFO(6);
1447                 GET_RX_QUEUE_INFO(7);
1448         }
1449
1450         HWRM_UNLOCK();
1451
1452         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1453                 goto done;
1454
1455         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1456                 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1457         } else {
1458                 int j;
1459
1460                 /* iterate and find the COSq profile to use for Tx */
1461                 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1462                         for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1463                                 if (bp->tx_cos_queue[i].id != 0xff)
1464                                         bp->tx_cosq_id[j++] =
1465                                                 bp->tx_cos_queue[i].id;
1466                         }
1467                 } else {
1468                         /* When CoS classification is disabled, for normal NIC
1469                          * operations, ideally we should look to use LOSSY.
1470                          * If not found, fallback to the first valid profile
1471                          */
1472                         if (!bnxt_find_lossy_profile(bp))
1473                                 bnxt_find_first_valid_profile(bp);
1474
1475                 }
1476         }
1477
1478         bp->max_tc = resp->max_configurable_queues;
1479         bp->max_lltc = resp->max_configurable_lossless_queues;
1480         if (bp->max_tc > BNXT_MAX_QUEUE)
1481                 bp->max_tc = BNXT_MAX_QUEUE;
1482         bp->max_q = bp->max_tc;
1483
1484         if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1485                 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1486                 goto get_rx_info;
1487         }
1488
1489 done:
1490         return rc;
1491 }
1492
1493 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1494                          struct bnxt_ring *ring,
1495                          uint32_t ring_type, uint32_t map_index,
1496                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1497                          uint16_t tx_cosq_id)
1498 {
1499         int rc = 0;
1500         uint32_t enables = 0;
1501         struct hwrm_ring_alloc_input req = {.req_type = 0 };
1502         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1503         struct rte_mempool *mb_pool;
1504         uint16_t rx_buf_size;
1505
1506         HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1507
1508         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1509         req.fbo = rte_cpu_to_le_32(0);
1510         /* Association of ring index with doorbell index */
1511         req.logical_id = rte_cpu_to_le_16(map_index);
1512         req.length = rte_cpu_to_le_32(ring->ring_size);
1513
1514         switch (ring_type) {
1515         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1516                 req.ring_type = ring_type;
1517                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1518                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1519                 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1520                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1521                         enables |=
1522                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1523                 break;
1524         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1525                 req.ring_type = ring_type;
1526                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1527                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1528                 if (BNXT_CHIP_THOR(bp)) {
1529                         mb_pool = bp->rx_queues[0]->mb_pool;
1530                         rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1531                                       RTE_PKTMBUF_HEADROOM;
1532                         rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1533                         req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1534                         enables |=
1535                                 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1536                 }
1537                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1538                         enables |=
1539                                 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1540                 break;
1541         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1542                 req.ring_type = ring_type;
1543                 if (BNXT_HAS_NQ(bp)) {
1544                         /* Association of cp ring with nq */
1545                         req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1546                         enables |=
1547                                 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1548                 }
1549                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1550                 break;
1551         case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1552                 req.ring_type = ring_type;
1553                 req.page_size = BNXT_PAGE_SHFT;
1554                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1555                 break;
1556         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1557                 req.ring_type = ring_type;
1558                 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1559
1560                 mb_pool = bp->rx_queues[0]->mb_pool;
1561                 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1562                               RTE_PKTMBUF_HEADROOM;
1563                 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1564                 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1565
1566                 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1567                 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1568                            HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1569                            HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1570                 break;
1571         default:
1572                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1573                         ring_type);
1574                 HWRM_UNLOCK();
1575                 return -EINVAL;
1576         }
1577         req.enables = rte_cpu_to_le_32(enables);
1578
1579         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1580
1581         if (rc || resp->error_code) {
1582                 if (rc == 0 && resp->error_code)
1583                         rc = rte_le_to_cpu_16(resp->error_code);
1584                 switch (ring_type) {
1585                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1586                         PMD_DRV_LOG(ERR,
1587                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1588                         HWRM_UNLOCK();
1589                         return rc;
1590                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1591                         PMD_DRV_LOG(ERR,
1592                                     "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1593                         HWRM_UNLOCK();
1594                         return rc;
1595                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1596                         PMD_DRV_LOG(ERR,
1597                                     "hwrm_ring_alloc rx agg failed. rc:%d\n",
1598                                     rc);
1599                         HWRM_UNLOCK();
1600                         return rc;
1601                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1602                         PMD_DRV_LOG(ERR,
1603                                     "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1604                         HWRM_UNLOCK();
1605                         return rc;
1606                 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1607                         PMD_DRV_LOG(ERR,
1608                                     "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1609                         HWRM_UNLOCK();
1610                         return rc;
1611                 default:
1612                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1613                         HWRM_UNLOCK();
1614                         return rc;
1615                 }
1616         }
1617
1618         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1619         HWRM_UNLOCK();
1620         return rc;
1621 }
1622
1623 int bnxt_hwrm_ring_free(struct bnxt *bp,
1624                         struct bnxt_ring *ring, uint32_t ring_type)
1625 {
1626         int rc;
1627         struct hwrm_ring_free_input req = {.req_type = 0 };
1628         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1629
1630         HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1631
1632         req.ring_type = ring_type;
1633         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1634
1635         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1636
1637         if (rc || resp->error_code) {
1638                 if (rc == 0 && resp->error_code)
1639                         rc = rte_le_to_cpu_16(resp->error_code);
1640                 HWRM_UNLOCK();
1641
1642                 switch (ring_type) {
1643                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1644                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1645                                 rc);
1646                         return rc;
1647                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1648                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1649                                 rc);
1650                         return rc;
1651                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1652                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1653                                 rc);
1654                         return rc;
1655                 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1656                         PMD_DRV_LOG(ERR,
1657                                     "hwrm_ring_free nq failed. rc:%d\n", rc);
1658                         return rc;
1659                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1660                         PMD_DRV_LOG(ERR,
1661                                     "hwrm_ring_free agg failed. rc:%d\n", rc);
1662                         return rc;
1663                 default:
1664                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1665                         return rc;
1666                 }
1667         }
1668         HWRM_UNLOCK();
1669         return 0;
1670 }
1671
1672 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1673 {
1674         int rc = 0;
1675         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1676         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1677
1678         HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1679
1680         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1681         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1682         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1683         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1684
1685         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1686
1687         HWRM_CHECK_RESULT();
1688
1689         bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1690
1691         HWRM_UNLOCK();
1692
1693         return rc;
1694 }
1695
1696 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1697 {
1698         int rc;
1699         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1700         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1701
1702         HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1703
1704         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1705
1706         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1707
1708         HWRM_CHECK_RESULT();
1709         HWRM_UNLOCK();
1710
1711         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1712         return rc;
1713 }
1714
1715 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1716 {
1717         int rc = 0;
1718         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1719         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1720
1721         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1722                 return rc;
1723
1724         HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1725
1726         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1727
1728         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1729
1730         HWRM_CHECK_RESULT();
1731         HWRM_UNLOCK();
1732
1733         return rc;
1734 }
1735
1736 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1737                                 unsigned int idx __rte_unused)
1738 {
1739         int rc;
1740         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1741         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1742
1743         HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1744
1745         req.update_period_ms = rte_cpu_to_le_32(0);
1746
1747         req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1748
1749         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1750
1751         HWRM_CHECK_RESULT();
1752
1753         cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1754
1755         HWRM_UNLOCK();
1756
1757         return rc;
1758 }
1759
1760 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1761                                 unsigned int idx __rte_unused)
1762 {
1763         int rc;
1764         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1765         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1766
1767         HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1768
1769         req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1770
1771         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1772
1773         HWRM_CHECK_RESULT();
1774         HWRM_UNLOCK();
1775
1776         return rc;
1777 }
1778
1779 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1780 {
1781         int rc = 0, i, j;
1782         struct hwrm_vnic_alloc_input req = { 0 };
1783         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1784
1785         if (!BNXT_HAS_RING_GRPS(bp))
1786                 goto skip_ring_grps;
1787
1788         /* map ring groups to this vnic */
1789         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1790                 vnic->start_grp_id, vnic->end_grp_id);
1791         for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1792                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1793
1794         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1795         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1796         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1797         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1798
1799 skip_ring_grps:
1800         vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
1801         HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1802
1803         if (vnic->func_default)
1804                 req.flags =
1805                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1806         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1807
1808         HWRM_CHECK_RESULT();
1809
1810         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1811         HWRM_UNLOCK();
1812         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1813         return rc;
1814 }
1815
1816 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1817                                         struct bnxt_vnic_info *vnic,
1818                                         struct bnxt_plcmodes_cfg *pmode)
1819 {
1820         int rc = 0;
1821         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1822         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1823
1824         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1825
1826         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1827
1828         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1829
1830         HWRM_CHECK_RESULT();
1831
1832         pmode->flags = rte_le_to_cpu_32(resp->flags);
1833         /* dflt_vnic bit doesn't exist in the _cfg command */
1834         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1835         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1836         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1837         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1838
1839         HWRM_UNLOCK();
1840
1841         return rc;
1842 }
1843
1844 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1845                                        struct bnxt_vnic_info *vnic,
1846                                        struct bnxt_plcmodes_cfg *pmode)
1847 {
1848         int rc = 0;
1849         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1850         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1851
1852         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1853                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1854                 return rc;
1855         }
1856
1857         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1858
1859         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1860         req.flags = rte_cpu_to_le_32(pmode->flags);
1861         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1862         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1863         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1864         req.enables = rte_cpu_to_le_32(
1865             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1866             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1867             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1868         );
1869
1870         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1871
1872         HWRM_CHECK_RESULT();
1873         HWRM_UNLOCK();
1874
1875         return rc;
1876 }
1877
1878 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1879 {
1880         int rc = 0;
1881         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1882         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1883         struct bnxt_plcmodes_cfg pmodes = { 0 };
1884         uint32_t ctx_enable_flag = 0;
1885         uint32_t enables = 0;
1886
1887         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1888                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1889                 return rc;
1890         }
1891
1892         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1893         if (rc)
1894                 return rc;
1895
1896         HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
1897
1898         if (BNXT_CHIP_THOR(bp)) {
1899                 int dflt_rxq = vnic->start_grp_id;
1900                 struct bnxt_rx_ring_info *rxr;
1901                 struct bnxt_cp_ring_info *cpr;
1902                 struct bnxt_rx_queue *rxq;
1903                 int i;
1904
1905                 /*
1906                  * The first active receive ring is used as the VNIC
1907                  * default receive ring. If there are no active receive
1908                  * rings (all corresponding receive queues are stopped),
1909                  * the first receive ring is used.
1910                  */
1911                 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
1912                         rxq = bp->eth_dev->data->rx_queues[i];
1913                         if (rxq->rx_started) {
1914                                 dflt_rxq = i;
1915                                 break;
1916                         }
1917                 }
1918
1919                 rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
1920                 rxr = rxq->rx_ring;
1921                 cpr = rxq->cp_ring;
1922
1923                 req.default_rx_ring_id =
1924                         rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1925                 req.default_cmpl_ring_id =
1926                         rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1927                 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1928                           HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1929                 goto config_mru;
1930         }
1931
1932         /* Only RSS support for now TBD: COS & LB */
1933         enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1934         if (vnic->lb_rule != 0xffff)
1935                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1936         if (vnic->cos_rule != 0xffff)
1937                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1938         if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1939                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1940                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1941         }
1942         if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1943                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
1944                 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
1945         }
1946
1947         enables |= ctx_enable_flag;
1948         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1949         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1950         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1951         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1952
1953 config_mru:
1954         req.enables = rte_cpu_to_le_32(enables);
1955         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1956         req.mru = rte_cpu_to_le_16(vnic->mru);
1957         /* Configure default VNIC only once. */
1958         if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1959                 req.flags |=
1960                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1961                 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1962         }
1963         if (vnic->vlan_strip)
1964                 req.flags |=
1965                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1966         if (vnic->bd_stall)
1967                 req.flags |=
1968                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1969         if (vnic->roce_dual)
1970                 req.flags |= rte_cpu_to_le_32(
1971                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1972         if (vnic->roce_only)
1973                 req.flags |= rte_cpu_to_le_32(
1974                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1975         if (vnic->rss_dflt_cr)
1976                 req.flags |= rte_cpu_to_le_32(
1977                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1978
1979         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1980
1981         HWRM_CHECK_RESULT();
1982         HWRM_UNLOCK();
1983
1984         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1985
1986         return rc;
1987 }
1988
1989 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1990                 int16_t fw_vf_id)
1991 {
1992         int rc = 0;
1993         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1994         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1995
1996         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1997                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1998                 return rc;
1999         }
2000         HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2001
2002         req.enables =
2003                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2004         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2005         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2006
2007         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2008
2009         HWRM_CHECK_RESULT();
2010
2011         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2012         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2013         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2014         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2015         vnic->mru = rte_le_to_cpu_16(resp->mru);
2016         vnic->func_default = rte_le_to_cpu_32(
2017                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2018         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2019                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2020         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2021                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2022         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
2023                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
2024         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
2025                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
2026         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2027                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2028
2029         HWRM_UNLOCK();
2030
2031         return rc;
2032 }
2033
2034 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2035                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2036 {
2037         int rc = 0;
2038         uint16_t ctx_id;
2039         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2040         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2041                                                 bp->hwrm_cmd_resp_addr;
2042
2043         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2044
2045         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2046         HWRM_CHECK_RESULT();
2047
2048         ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2049         if (!BNXT_HAS_RING_GRPS(bp))
2050                 vnic->fw_grp_ids[ctx_idx] = ctx_id;
2051         else if (ctx_idx == 0)
2052                 vnic->rss_rule = ctx_id;
2053
2054         HWRM_UNLOCK();
2055
2056         return rc;
2057 }
2058
2059 static
2060 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2061                              struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2062 {
2063         int rc = 0;
2064         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2065         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2066                                                 bp->hwrm_cmd_resp_addr;
2067
2068         if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2069                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2070                 return rc;
2071         }
2072         HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2073
2074         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2075
2076         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2077
2078         HWRM_CHECK_RESULT();
2079         HWRM_UNLOCK();
2080
2081         return rc;
2082 }
2083
2084 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2085 {
2086         int rc = 0;
2087
2088         if (BNXT_CHIP_THOR(bp)) {
2089                 int j;
2090
2091                 for (j = 0; j < vnic->num_lb_ctxts; j++) {
2092                         rc = _bnxt_hwrm_vnic_ctx_free(bp,
2093                                                       vnic,
2094                                                       vnic->fw_grp_ids[j]);
2095                         vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2096                 }
2097                 vnic->num_lb_ctxts = 0;
2098         } else {
2099                 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2100                 vnic->rss_rule = INVALID_HW_RING_ID;
2101         }
2102
2103         return rc;
2104 }
2105
2106 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2107 {
2108         int rc = 0;
2109         struct hwrm_vnic_free_input req = {.req_type = 0 };
2110         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2111
2112         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2113                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2114                 return rc;
2115         }
2116
2117         HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2118
2119         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2120
2121         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2122
2123         HWRM_CHECK_RESULT();
2124         HWRM_UNLOCK();
2125
2126         vnic->fw_vnic_id = INVALID_HW_RING_ID;
2127         /* Configure default VNIC again if necessary. */
2128         if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2129                 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2130
2131         return rc;
2132 }
2133
2134 static int
2135 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2136 {
2137         int i;
2138         int rc = 0;
2139         int nr_ctxs = vnic->num_lb_ctxts;
2140         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2141         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2142
2143         for (i = 0; i < nr_ctxs; i++) {
2144                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2145
2146                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2147                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2148                 req.hash_mode_flags = vnic->hash_mode;
2149
2150                 req.hash_key_tbl_addr =
2151                         rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2152
2153                 req.ring_grp_tbl_addr =
2154                         rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2155                                          i * HW_HASH_INDEX_SIZE);
2156                 req.ring_table_pair_index = i;
2157                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2158
2159                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2160                                             BNXT_USE_CHIMP_MB);
2161
2162                 HWRM_CHECK_RESULT();
2163                 HWRM_UNLOCK();
2164         }
2165
2166         return rc;
2167 }
2168
2169 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2170                            struct bnxt_vnic_info *vnic)
2171 {
2172         int rc = 0;
2173         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2174         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2175
2176         if (!vnic->rss_table)
2177                 return 0;
2178
2179         if (BNXT_CHIP_THOR(bp))
2180                 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
2181
2182         HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2183
2184         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2185         req.hash_mode_flags = vnic->hash_mode;
2186
2187         req.ring_grp_tbl_addr =
2188             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2189         req.hash_key_tbl_addr =
2190             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2191         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2192         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2193
2194         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2195
2196         HWRM_CHECK_RESULT();
2197         HWRM_UNLOCK();
2198
2199         return rc;
2200 }
2201
2202 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2203                         struct bnxt_vnic_info *vnic)
2204 {
2205         int rc = 0;
2206         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2207         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2208         uint16_t size;
2209
2210         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2211                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2212                 return rc;
2213         }
2214
2215         HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2216
2217         req.flags = rte_cpu_to_le_32(
2218                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2219
2220         req.enables = rte_cpu_to_le_32(
2221                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2222
2223         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2224         size -= RTE_PKTMBUF_HEADROOM;
2225         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2226
2227         req.jumbo_thresh = rte_cpu_to_le_16(size);
2228         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2229
2230         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2231
2232         HWRM_CHECK_RESULT();
2233         HWRM_UNLOCK();
2234
2235         return rc;
2236 }
2237
2238 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2239                         struct bnxt_vnic_info *vnic, bool enable)
2240 {
2241         int rc = 0;
2242         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2243         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2244
2245         if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
2246                 if (enable)
2247                         PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2248                 return -ENOTSUP;
2249         }
2250
2251         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2252                 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2253                 return 0;
2254         }
2255
2256         HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2257
2258         if (enable) {
2259                 req.enables = rte_cpu_to_le_32(
2260                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2261                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2262                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2263                 req.flags = rte_cpu_to_le_32(
2264                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2265                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2266                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2267                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2268                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2269                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2270                 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2271                 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2272                 req.min_agg_len = rte_cpu_to_le_32(512);
2273         }
2274         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2275
2276         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2277
2278         HWRM_CHECK_RESULT();
2279         HWRM_UNLOCK();
2280
2281         return rc;
2282 }
2283
2284 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2285 {
2286         struct hwrm_func_cfg_input req = {0};
2287         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2288         int rc;
2289
2290         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2291         req.enables = rte_cpu_to_le_32(
2292                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2293         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2294         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2295
2296         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2297
2298         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2299         HWRM_CHECK_RESULT();
2300         HWRM_UNLOCK();
2301
2302         bp->pf->vf_info[vf].random_mac = false;
2303
2304         return rc;
2305 }
2306
2307 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2308                                   uint64_t *dropped)
2309 {
2310         int rc = 0;
2311         struct hwrm_func_qstats_input req = {.req_type = 0};
2312         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2313
2314         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2315
2316         req.fid = rte_cpu_to_le_16(fid);
2317
2318         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2319
2320         HWRM_CHECK_RESULT();
2321
2322         if (dropped)
2323                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2324
2325         HWRM_UNLOCK();
2326
2327         return rc;
2328 }
2329
2330 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2331                           struct rte_eth_stats *stats,
2332                           struct hwrm_func_qstats_output *func_qstats)
2333 {
2334         int rc = 0;
2335         struct hwrm_func_qstats_input req = {.req_type = 0};
2336         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2337
2338         HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2339
2340         req.fid = rte_cpu_to_le_16(fid);
2341
2342         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2343
2344         HWRM_CHECK_RESULT();
2345         if (func_qstats)
2346                 memcpy(func_qstats, resp,
2347                        sizeof(struct hwrm_func_qstats_output));
2348
2349         if (!stats)
2350                 goto exit;
2351
2352         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2353         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2354         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2355         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2356         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2357         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2358
2359         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2360         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2361         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2362         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2363         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2364         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2365
2366         stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2367         stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2368         stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2369
2370 exit:
2371         HWRM_UNLOCK();
2372
2373         return rc;
2374 }
2375
2376 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2377 {
2378         int rc = 0;
2379         struct hwrm_func_clr_stats_input req = {.req_type = 0};
2380         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2381
2382         HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2383
2384         req.fid = rte_cpu_to_le_16(fid);
2385
2386         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2387
2388         HWRM_CHECK_RESULT();
2389         HWRM_UNLOCK();
2390
2391         return rc;
2392 }
2393
2394 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2395 {
2396         unsigned int i;
2397         int rc = 0;
2398
2399         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2400                 struct bnxt_tx_queue *txq;
2401                 struct bnxt_rx_queue *rxq;
2402                 struct bnxt_cp_ring_info *cpr;
2403
2404                 if (i >= bp->rx_cp_nr_rings) {
2405                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2406                         cpr = txq->cp_ring;
2407                 } else {
2408                         rxq = bp->rx_queues[i];
2409                         cpr = rxq->cp_ring;
2410                 }
2411
2412                 rc = bnxt_hwrm_stat_clear(bp, cpr);
2413                 if (rc)
2414                         return rc;
2415         }
2416         return 0;
2417 }
2418
2419 static int
2420 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2421 {
2422         int rc;
2423         unsigned int i;
2424         struct bnxt_cp_ring_info *cpr;
2425
2426         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2427
2428                 if (i >= bp->rx_cp_nr_rings) {
2429                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2430                 } else {
2431                         cpr = bp->rx_queues[i]->cp_ring;
2432                         if (BNXT_HAS_RING_GRPS(bp))
2433                                 bp->grp_info[i].fw_stats_ctx = -1;
2434                 }
2435                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2436                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2437                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2438                         if (rc)
2439                                 return rc;
2440                 }
2441         }
2442         return 0;
2443 }
2444
2445 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2446 {
2447         unsigned int i;
2448         int rc = 0;
2449
2450         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2451                 struct bnxt_tx_queue *txq;
2452                 struct bnxt_rx_queue *rxq;
2453                 struct bnxt_cp_ring_info *cpr;
2454
2455                 if (i >= bp->rx_cp_nr_rings) {
2456                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2457                         cpr = txq->cp_ring;
2458                 } else {
2459                         rxq = bp->rx_queues[i];
2460                         cpr = rxq->cp_ring;
2461                 }
2462
2463                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2464
2465                 if (rc)
2466                         return rc;
2467         }
2468         return rc;
2469 }
2470
2471 static int
2472 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2473 {
2474         uint16_t idx;
2475         uint32_t rc = 0;
2476
2477         if (!BNXT_HAS_RING_GRPS(bp))
2478                 return 0;
2479
2480         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2481
2482                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2483                         continue;
2484
2485                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2486
2487                 if (rc)
2488                         return rc;
2489         }
2490         return rc;
2491 }
2492
2493 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2494 {
2495         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2496
2497         bnxt_hwrm_ring_free(bp, cp_ring,
2498                             HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2499         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2500         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2501                                      sizeof(*cpr->cp_desc_ring));
2502         cpr->cp_raw_cons = 0;
2503         cpr->valid = 0;
2504 }
2505
2506 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2507 {
2508         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2509
2510         bnxt_hwrm_ring_free(bp, cp_ring,
2511                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2512         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2513         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2514                         sizeof(*cpr->cp_desc_ring));
2515         cpr->cp_raw_cons = 0;
2516         cpr->valid = 0;
2517 }
2518
2519 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2520 {
2521         struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2522         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2523         struct bnxt_ring *ring = rxr->rx_ring_struct;
2524         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2525
2526         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2527                 bnxt_hwrm_ring_free(bp, ring,
2528                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2529                 ring->fw_ring_id = INVALID_HW_RING_ID;
2530                 if (BNXT_HAS_RING_GRPS(bp))
2531                         bp->grp_info[queue_index].rx_fw_ring_id =
2532                                                         INVALID_HW_RING_ID;
2533         }
2534         ring = rxr->ag_ring_struct;
2535         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2536                 bnxt_hwrm_ring_free(bp, ring,
2537                                     BNXT_CHIP_THOR(bp) ?
2538                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2539                                     HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2540                 if (BNXT_HAS_RING_GRPS(bp))
2541                         bp->grp_info[queue_index].ag_fw_ring_id =
2542                                                         INVALID_HW_RING_ID;
2543         }
2544         if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2545                 bnxt_free_cp_ring(bp, cpr);
2546
2547         if (BNXT_HAS_RING_GRPS(bp))
2548                 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2549 }
2550
2551 static int
2552 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2553 {
2554         unsigned int i;
2555
2556         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2557                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2558                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2559                 struct bnxt_ring *ring = txr->tx_ring_struct;
2560                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2561
2562                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2563                         bnxt_hwrm_ring_free(bp, ring,
2564                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2565                         ring->fw_ring_id = INVALID_HW_RING_ID;
2566                         memset(txr->tx_desc_ring, 0,
2567                                         txr->tx_ring_struct->ring_size *
2568                                         sizeof(*txr->tx_desc_ring));
2569                         memset(txr->tx_buf_ring, 0,
2570                                         txr->tx_ring_struct->ring_size *
2571                                         sizeof(*txr->tx_buf_ring));
2572                         txr->tx_prod = 0;
2573                         txr->tx_cons = 0;
2574                 }
2575                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2576                         bnxt_free_cp_ring(bp, cpr);
2577                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2578                 }
2579         }
2580
2581         for (i = 0; i < bp->rx_cp_nr_rings; i++)
2582                 bnxt_free_hwrm_rx_ring(bp, i);
2583
2584         return 0;
2585 }
2586
2587 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2588 {
2589         uint16_t i;
2590         uint32_t rc = 0;
2591
2592         if (!BNXT_HAS_RING_GRPS(bp))
2593                 return 0;
2594
2595         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2596                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2597                 if (rc)
2598                         return rc;
2599         }
2600         return rc;
2601 }
2602
2603 /*
2604  * HWRM utility functions
2605  */
2606
2607 void bnxt_free_hwrm_resources(struct bnxt *bp)
2608 {
2609         /* Release memzone */
2610         rte_free(bp->hwrm_cmd_resp_addr);
2611         rte_free(bp->hwrm_short_cmd_req_addr);
2612         bp->hwrm_cmd_resp_addr = NULL;
2613         bp->hwrm_short_cmd_req_addr = NULL;
2614         bp->hwrm_cmd_resp_dma_addr = 0;
2615         bp->hwrm_short_cmd_req_dma_addr = 0;
2616 }
2617
2618 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2619 {
2620         struct rte_pci_device *pdev = bp->pdev;
2621         char type[RTE_MEMZONE_NAMESIZE];
2622
2623         sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2624                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2625         bp->max_resp_len = HWRM_MAX_RESP_LEN;
2626         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2627         if (bp->hwrm_cmd_resp_addr == NULL)
2628                 return -ENOMEM;
2629         bp->hwrm_cmd_resp_dma_addr =
2630                 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2631         if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2632                 PMD_DRV_LOG(ERR,
2633                         "unable to map response address to physical memory\n");
2634                 return -ENOMEM;
2635         }
2636         rte_spinlock_init(&bp->hwrm_lock);
2637
2638         return 0;
2639 }
2640
2641 int
2642 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2643 {
2644         int rc = 0;
2645
2646         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2647                 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2648                 if (rc)
2649                         return rc;
2650         } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2651                 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2652                 if (rc)
2653                         return rc;
2654         }
2655
2656         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2657         return rc;
2658 }
2659
2660 static int
2661 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2662 {
2663         struct bnxt_filter_info *filter;
2664         int rc = 0;
2665
2666         STAILQ_FOREACH(filter, &vnic->filter, next) {
2667                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2668                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2669                 bnxt_free_filter(bp, filter);
2670         }
2671         return rc;
2672 }
2673
2674 static int
2675 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2676 {
2677         struct bnxt_filter_info *filter;
2678         struct rte_flow *flow;
2679         int rc = 0;
2680
2681         while (!STAILQ_EMPTY(&vnic->flow_list)) {
2682                 flow = STAILQ_FIRST(&vnic->flow_list);
2683                 filter = flow->filter;
2684                 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2685                 rc = bnxt_clear_one_vnic_filter(bp, filter);
2686
2687                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2688                 rte_free(flow);
2689         }
2690         return rc;
2691 }
2692
2693 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2694 {
2695         struct bnxt_filter_info *filter;
2696         int rc = 0;
2697
2698         STAILQ_FOREACH(filter, &vnic->filter, next) {
2699                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2700                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2701                                                      filter);
2702                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2703                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2704                                                          filter);
2705                 else
2706                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2707                                                      filter);
2708                 if (rc)
2709                         break;
2710         }
2711         return rc;
2712 }
2713
2714 static void
2715 bnxt_free_tunnel_ports(struct bnxt *bp)
2716 {
2717         if (bp->vxlan_port_cnt)
2718                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2719                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2720         bp->vxlan_port = 0;
2721         if (bp->geneve_port_cnt)
2722                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2723                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2724         bp->geneve_port = 0;
2725 }
2726
2727 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2728 {
2729         int i;
2730
2731         if (bp->vnic_info == NULL)
2732                 return;
2733
2734         /*
2735          * Cleanup VNICs in reverse order, to make sure the L2 filter
2736          * from vnic0 is last to be cleaned up.
2737          */
2738         for (i = bp->max_vnics - 1; i >= 0; i--) {
2739                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2740
2741                 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2742                         continue;
2743
2744                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2745
2746                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2747
2748                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2749
2750                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2751
2752                 bnxt_hwrm_vnic_free(bp, vnic);
2753
2754                 rte_free(vnic->fw_grp_ids);
2755         }
2756         /* Ring resources */
2757         bnxt_free_all_hwrm_rings(bp);
2758         bnxt_free_all_hwrm_ring_grps(bp);
2759         bnxt_free_all_hwrm_stat_ctxs(bp);
2760         bnxt_free_tunnel_ports(bp);
2761 }
2762
2763 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2764 {
2765         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2766
2767         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2768                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2769
2770         switch (conf_link_speed) {
2771         case ETH_LINK_SPEED_10M_HD:
2772         case ETH_LINK_SPEED_100M_HD:
2773                 /* FALLTHROUGH */
2774                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2775         }
2776         return hw_link_duplex;
2777 }
2778
2779 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2780 {
2781         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2782 }
2783
2784 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2785 {
2786         uint16_t eth_link_speed = 0;
2787
2788         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2789                 return ETH_LINK_SPEED_AUTONEG;
2790
2791         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2792         case ETH_LINK_SPEED_100M:
2793         case ETH_LINK_SPEED_100M_HD:
2794                 /* FALLTHROUGH */
2795                 eth_link_speed =
2796                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2797                 break;
2798         case ETH_LINK_SPEED_1G:
2799                 eth_link_speed =
2800                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2801                 break;
2802         case ETH_LINK_SPEED_2_5G:
2803                 eth_link_speed =
2804                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2805                 break;
2806         case ETH_LINK_SPEED_10G:
2807                 eth_link_speed =
2808                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2809                 break;
2810         case ETH_LINK_SPEED_20G:
2811                 eth_link_speed =
2812                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2813                 break;
2814         case ETH_LINK_SPEED_25G:
2815                 eth_link_speed =
2816                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2817                 break;
2818         case ETH_LINK_SPEED_40G:
2819                 eth_link_speed =
2820                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2821                 break;
2822         case ETH_LINK_SPEED_50G:
2823                 eth_link_speed =
2824                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2825                 break;
2826         case ETH_LINK_SPEED_100G:
2827                 eth_link_speed =
2828                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2829                 break;
2830         case ETH_LINK_SPEED_200G:
2831                 eth_link_speed =
2832                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_200GB;
2833                 break;
2834         default:
2835                 PMD_DRV_LOG(ERR,
2836                         "Unsupported link speed %d; default to AUTO\n",
2837                         conf_link_speed);
2838                 break;
2839         }
2840         return eth_link_speed;
2841 }
2842
2843 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2844                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2845                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2846                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
2847                 ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
2848
2849 static int bnxt_validate_link_speed(struct bnxt *bp)
2850 {
2851         uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
2852         uint16_t port_id = bp->eth_dev->data->port_id;
2853         uint32_t link_speed_capa;
2854         uint32_t one_speed;
2855
2856         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2857                 return 0;
2858
2859         link_speed_capa = bnxt_get_speed_capabilities(bp);
2860
2861         if (link_speed & ETH_LINK_SPEED_FIXED) {
2862                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2863
2864                 if (one_speed & (one_speed - 1)) {
2865                         PMD_DRV_LOG(ERR,
2866                                 "Invalid advertised speeds (%u) for port %u\n",
2867                                 link_speed, port_id);
2868                         return -EINVAL;
2869                 }
2870                 if ((one_speed & link_speed_capa) != one_speed) {
2871                         PMD_DRV_LOG(ERR,
2872                                 "Unsupported advertised speed (%u) for port %u\n",
2873                                 link_speed, port_id);
2874                         return -EINVAL;
2875                 }
2876         } else {
2877                 if (!(link_speed & link_speed_capa)) {
2878                         PMD_DRV_LOG(ERR,
2879                                 "Unsupported advertised speeds (%u) for port %u\n",
2880                                 link_speed, port_id);
2881                         return -EINVAL;
2882                 }
2883         }
2884         return 0;
2885 }
2886
2887 static uint16_t
2888 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2889 {
2890         uint16_t ret = 0;
2891
2892         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2893                 if (bp->link_info->support_speeds)
2894                         return bp->link_info->support_speeds;
2895                 link_speed = BNXT_SUPPORTED_SPEEDS;
2896         }
2897
2898         if (link_speed & ETH_LINK_SPEED_100M)
2899                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2900         if (link_speed & ETH_LINK_SPEED_100M_HD)
2901                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2902         if (link_speed & ETH_LINK_SPEED_1G)
2903                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2904         if (link_speed & ETH_LINK_SPEED_2_5G)
2905                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2906         if (link_speed & ETH_LINK_SPEED_10G)
2907                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2908         if (link_speed & ETH_LINK_SPEED_20G)
2909                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2910         if (link_speed & ETH_LINK_SPEED_25G)
2911                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2912         if (link_speed & ETH_LINK_SPEED_40G)
2913                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2914         if (link_speed & ETH_LINK_SPEED_50G)
2915                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2916         if (link_speed & ETH_LINK_SPEED_100G)
2917                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2918         if (link_speed & ETH_LINK_SPEED_200G)
2919                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_200GB;
2920         return ret;
2921 }
2922
2923 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2924 {
2925         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2926
2927         switch (hw_link_speed) {
2928         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2929                 eth_link_speed = ETH_SPEED_NUM_100M;
2930                 break;
2931         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2932                 eth_link_speed = ETH_SPEED_NUM_1G;
2933                 break;
2934         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2935                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2936                 break;
2937         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2938                 eth_link_speed = ETH_SPEED_NUM_10G;
2939                 break;
2940         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2941                 eth_link_speed = ETH_SPEED_NUM_20G;
2942                 break;
2943         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2944                 eth_link_speed = ETH_SPEED_NUM_25G;
2945                 break;
2946         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2947                 eth_link_speed = ETH_SPEED_NUM_40G;
2948                 break;
2949         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2950                 eth_link_speed = ETH_SPEED_NUM_50G;
2951                 break;
2952         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2953                 eth_link_speed = ETH_SPEED_NUM_100G;
2954                 break;
2955         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
2956                 eth_link_speed = ETH_SPEED_NUM_200G;
2957                 break;
2958         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2959         default:
2960                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2961                         hw_link_speed);
2962                 break;
2963         }
2964         return eth_link_speed;
2965 }
2966
2967 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2968 {
2969         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2970
2971         switch (hw_link_duplex) {
2972         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2973         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2974                 /* FALLTHROUGH */
2975                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2976                 break;
2977         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2978                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2979                 break;
2980         default:
2981                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2982                         hw_link_duplex);
2983                 break;
2984         }
2985         return eth_link_duplex;
2986 }
2987
2988 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2989 {
2990         int rc = 0;
2991         struct bnxt_link_info *link_info = bp->link_info;
2992
2993         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2994         if (rc) {
2995                 PMD_DRV_LOG(ERR,
2996                         "Get link config failed with rc %d\n", rc);
2997                 goto exit;
2998         }
2999         if (link_info->link_speed)
3000                 link->link_speed =
3001                         bnxt_parse_hw_link_speed(link_info->link_speed);
3002         else
3003                 link->link_speed = ETH_SPEED_NUM_NONE;
3004         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3005         link->link_status = link_info->link_up;
3006         link->link_autoneg = link_info->auto_mode ==
3007                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
3008                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
3009 exit:
3010         return rc;
3011 }
3012
3013 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3014 {
3015         int rc = 0;
3016         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3017         struct bnxt_link_info link_req;
3018         uint16_t speed, autoneg;
3019
3020         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3021                 return 0;
3022
3023         rc = bnxt_validate_link_speed(bp);
3024         if (rc)
3025                 goto error;
3026
3027         memset(&link_req, 0, sizeof(link_req));
3028         link_req.link_up = link_up;
3029         if (!link_up)
3030                 goto port_phy_cfg;
3031
3032         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3033         if (BNXT_CHIP_THOR(bp) &&
3034             dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
3035                 /* 40G is not supported as part of media auto detect.
3036                  * The speed should be forced and autoneg disabled
3037                  * to configure 40G speed.
3038                  */
3039                 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3040                 autoneg = 0;
3041         }
3042
3043         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
3044         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3045         /* Autoneg can be done only when the FW allows.
3046          * When user configures fixed speed of 40G and later changes to
3047          * any other speed, auto_link_speed/force_link_speed is still set
3048          * to 40G until link comes up at new speed.
3049          */
3050         if (autoneg == 1 &&
3051             !(!BNXT_CHIP_THOR(bp) &&
3052               (bp->link_info->auto_link_speed ||
3053                bp->link_info->force_link_speed))) {
3054                 link_req.phy_flags |=
3055                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3056                 link_req.auto_link_speed_mask =
3057                         bnxt_parse_eth_link_speed_mask(bp,
3058                                                        dev_conf->link_speeds);
3059         } else {
3060                 if (bp->link_info->phy_type ==
3061                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3062                     bp->link_info->phy_type ==
3063                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3064                     bp->link_info->media_type ==
3065                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3066                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3067                         return -EINVAL;
3068                 }
3069
3070                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3071                 /* If user wants a particular speed try that first. */
3072                 if (speed)
3073                         link_req.link_speed = speed;
3074                 else if (bp->link_info->force_link_speed)
3075                         link_req.link_speed = bp->link_info->force_link_speed;
3076                 else
3077                         link_req.link_speed = bp->link_info->auto_link_speed;
3078         }
3079         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3080         link_req.auto_pause = bp->link_info->auto_pause;
3081         link_req.force_pause = bp->link_info->force_pause;
3082
3083 port_phy_cfg:
3084         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3085         if (rc) {
3086                 PMD_DRV_LOG(ERR,
3087                         "Set link config failed with rc %d\n", rc);
3088         }
3089
3090 error:
3091         return rc;
3092 }
3093
3094 /* JIRA 22088 */
3095 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3096 {
3097         struct hwrm_func_qcfg_input req = {0};
3098         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3099         uint16_t flags;
3100         int rc = 0;
3101         bp->func_svif = BNXT_SVIF_INVALID;
3102         uint16_t svif_info;
3103
3104         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3105         req.fid = rte_cpu_to_le_16(0xffff);
3106
3107         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3108
3109         HWRM_CHECK_RESULT();
3110
3111         /* Hard Coded.. 0xfff VLAN ID mask */
3112         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3113
3114         svif_info = rte_le_to_cpu_16(resp->svif_info);
3115         if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3116                 bp->func_svif = svif_info &
3117                                      HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3118
3119         flags = rte_le_to_cpu_16(resp->flags);
3120         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3121                 bp->flags |= BNXT_FLAG_MULTI_HOST;
3122
3123         if (BNXT_VF(bp) &&
3124             !BNXT_VF_IS_TRUSTED(bp) &&
3125             (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3126                 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3127                 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3128         } else if (BNXT_VF(bp) &&
3129                    BNXT_VF_IS_TRUSTED(bp) &&
3130                    !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3131                 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3132                 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3133         }
3134
3135         if (mtu)
3136                 *mtu = rte_le_to_cpu_16(resp->mtu);
3137
3138         switch (resp->port_partition_type) {
3139         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3140         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3141         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3142                 /* FALLTHROUGH */
3143                 bp->flags |= BNXT_FLAG_NPAR_PF;
3144                 break;
3145         default:
3146                 bp->flags &= ~BNXT_FLAG_NPAR_PF;
3147                 break;
3148         }
3149
3150         HWRM_UNLOCK();
3151
3152         return rc;
3153 }
3154
3155 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3156 {
3157         struct hwrm_func_qcfg_input req = {0};
3158         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3159         int rc;
3160
3161         if (!BNXT_VF_IS_TRUSTED(bp))
3162                 return 0;
3163
3164         if (!bp->parent)
3165                 return -EINVAL;
3166
3167         bp->parent->fid = BNXT_PF_FID_INVALID;
3168
3169         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3170
3171         req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3172
3173         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3174
3175         HWRM_CHECK_RESULT();
3176
3177         memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3178         bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3179         bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3180         bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3181
3182         /* FIXME: Temporary workaround - remove when firmware issue is fixed. */
3183         if (bp->parent->vnic == 0) {
3184                 PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n");
3185                 /* Use hard-coded values appropriate for current Wh+ fw. */
3186                 if (bp->parent->fid == 2)
3187                         bp->parent->vnic = 0x100;
3188                 else
3189                         bp->parent->vnic = 1;
3190         }
3191
3192         HWRM_UNLOCK();
3193
3194         return 0;
3195 }
3196
3197 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3198                                  uint16_t *vnic_id, uint16_t *svif)
3199 {
3200         struct hwrm_func_qcfg_input req = {0};
3201         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3202         uint16_t svif_info;
3203         int rc = 0;
3204
3205         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3206         req.fid = rte_cpu_to_le_16(fid);
3207
3208         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3209
3210         HWRM_CHECK_RESULT();
3211
3212         if (vnic_id)
3213                 *vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3214
3215         svif_info = rte_le_to_cpu_16(resp->svif_info);
3216         if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3217                 *svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3218
3219         HWRM_UNLOCK();
3220
3221         return rc;
3222 }
3223
3224 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3225 {
3226         struct hwrm_port_mac_qcfg_input req = {0};
3227         struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3228         uint16_t port_svif_info;
3229         int rc;
3230
3231         bp->port_svif = BNXT_SVIF_INVALID;
3232
3233         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3234                 return 0;
3235
3236         HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3237
3238         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3239
3240         HWRM_CHECK_RESULT_SILENT();
3241
3242         port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3243         if (port_svif_info &
3244             HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3245                 bp->port_svif = port_svif_info &
3246                         HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3247
3248         HWRM_UNLOCK();
3249
3250         return 0;
3251 }
3252
3253 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
3254                                    struct hwrm_func_qcaps_output *qcaps)
3255 {
3256         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
3257         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
3258                sizeof(qcaps->mac_address));
3259         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
3260         qcaps->max_rx_rings = fcfg->num_rx_rings;
3261         qcaps->max_tx_rings = fcfg->num_tx_rings;
3262         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
3263         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
3264         qcaps->max_vfs = 0;
3265         qcaps->first_vf_id = 0;
3266         qcaps->max_vnics = fcfg->num_vnics;
3267         qcaps->max_decap_records = 0;
3268         qcaps->max_encap_records = 0;
3269         qcaps->max_tx_wm_flows = 0;
3270         qcaps->max_tx_em_flows = 0;
3271         qcaps->max_rx_wm_flows = 0;
3272         qcaps->max_rx_em_flows = 0;
3273         qcaps->max_flow_id = 0;
3274         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
3275         qcaps->max_sp_tx_rings = 0;
3276         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
3277 }
3278
3279 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
3280 {
3281         struct hwrm_func_cfg_input req = {0};
3282         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3283         uint32_t enables;
3284         int rc;
3285
3286         enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3287                   HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3288                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3289                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3290                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3291                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3292                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3293                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3294                   HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3295
3296         if (BNXT_HAS_RING_GRPS(bp)) {
3297                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3298                 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
3299         } else if (BNXT_HAS_NQ(bp)) {
3300                 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3301                 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3302         }
3303
3304         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3305         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3306         req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3307         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
3308         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
3309         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
3310         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
3311         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
3312         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
3313         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3314         req.fid = rte_cpu_to_le_16(0xffff);
3315         req.enables = rte_cpu_to_le_32(enables);
3316
3317         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3318
3319         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3320
3321         HWRM_CHECK_RESULT();
3322         HWRM_UNLOCK();
3323
3324         return rc;
3325 }
3326
3327 static void populate_vf_func_cfg_req(struct bnxt *bp,
3328                                      struct hwrm_func_cfg_input *req,
3329                                      int num_vfs)
3330 {
3331         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3332                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3333                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3334                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3335                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3336                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3337                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3338                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3339                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3340                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3341
3342         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3343                                     RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3344                                     BNXT_NUM_VLANS);
3345         req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3346         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3347                                                 (num_vfs + 1));
3348         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3349         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3350                                                (num_vfs + 1));
3351         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3352         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3353         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3354         /* TODO: For now, do not support VMDq/RFS on VFs. */
3355         req->num_vnics = rte_cpu_to_le_16(1);
3356         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3357                                                  (num_vfs + 1));
3358 }
3359
3360 static void add_random_mac_if_needed(struct bnxt *bp,
3361                                      struct hwrm_func_cfg_input *cfg_req,
3362                                      int vf)
3363 {
3364         struct rte_ether_addr mac;
3365
3366         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
3367                 return;
3368
3369         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
3370                 cfg_req->enables |=
3371                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3372                 rte_eth_random_addr(cfg_req->dflt_mac_addr);
3373                 bp->pf->vf_info[vf].random_mac = true;
3374         } else {
3375                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
3376                         RTE_ETHER_ADDR_LEN);
3377         }
3378 }
3379
3380 static int reserve_resources_from_vf(struct bnxt *bp,
3381                                      struct hwrm_func_cfg_input *cfg_req,
3382                                      int vf)
3383 {
3384         struct hwrm_func_qcaps_input req = {0};
3385         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3386         int rc;
3387
3388         /* Get the actual allocated values now */
3389         HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
3390         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3391         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3392
3393         if (rc) {
3394                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
3395                 copy_func_cfg_to_qcaps(cfg_req, resp);
3396         } else if (resp->error_code) {
3397                 rc = rte_le_to_cpu_16(resp->error_code);
3398                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
3399                 copy_func_cfg_to_qcaps(cfg_req, resp);
3400         }
3401
3402         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
3403         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
3404         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
3405         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
3406         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
3407         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
3408         /*
3409          * TODO: While not supporting VMDq with VFs, max_vnics is always
3410          * forced to 1 in this case
3411          */
3412         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
3413         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
3414
3415         HWRM_UNLOCK();
3416
3417         return 0;
3418 }
3419
3420 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3421 {
3422         struct hwrm_func_qcfg_input req = {0};
3423         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3424         int rc;
3425
3426         /* Check for zero MAC address */
3427         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3428         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3429         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3430         HWRM_CHECK_RESULT();
3431         rc = rte_le_to_cpu_16(resp->vlan);
3432
3433         HWRM_UNLOCK();
3434
3435         return rc;
3436 }
3437
3438 static int update_pf_resource_max(struct bnxt *bp)
3439 {
3440         struct hwrm_func_qcfg_input req = {0};
3441         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3442         int rc;
3443
3444         /* And copy the allocated numbers into the pf struct */
3445         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3446         req.fid = rte_cpu_to_le_16(0xffff);
3447         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3448         HWRM_CHECK_RESULT();
3449
3450         /* Only TX ring value reflects actual allocation? TODO */
3451         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3452         bp->pf->evb_mode = resp->evb_mode;
3453
3454         HWRM_UNLOCK();
3455
3456         return rc;
3457 }
3458
3459 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3460 {
3461         int rc;
3462
3463         if (!BNXT_PF(bp)) {
3464                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3465                 return -EINVAL;
3466         }
3467
3468         rc = bnxt_hwrm_func_qcaps(bp);
3469         if (rc)
3470                 return rc;
3471
3472         bp->pf->func_cfg_flags &=
3473                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3474                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3475         bp->pf->func_cfg_flags |=
3476                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3477         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3478         rc = __bnxt_hwrm_func_qcaps(bp);
3479         return rc;
3480 }
3481
3482 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3483 {
3484         struct hwrm_func_cfg_input req = {0};
3485         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3486         int i;
3487         size_t sz;
3488         int rc = 0;
3489         size_t req_buf_sz;
3490
3491         if (!BNXT_PF(bp)) {
3492                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3493                 return -EINVAL;
3494         }
3495
3496         rc = bnxt_hwrm_func_qcaps(bp);
3497
3498         if (rc)
3499                 return rc;
3500
3501         bp->pf->active_vfs = num_vfs;
3502
3503         /*
3504          * First, configure the PF to only use one TX ring.  This ensures that
3505          * there are enough rings for all VFs.
3506          *
3507          * If we don't do this, when we call func_alloc() later, we will lock
3508          * extra rings to the PF that won't be available during func_cfg() of
3509          * the VFs.
3510          *
3511          * This has been fixed with firmware versions above 20.6.54
3512          */
3513         bp->pf->func_cfg_flags &=
3514                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3515                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3516         bp->pf->func_cfg_flags |=
3517                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3518         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
3519         if (rc)
3520                 return rc;
3521
3522         /*
3523          * Now, create and register a buffer to hold forwarded VF requests
3524          */
3525         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3526         bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3527                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3528         if (bp->pf->vf_req_buf == NULL) {
3529                 rc = -ENOMEM;
3530                 goto error_free;
3531         }
3532         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3533                 rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3534         for (i = 0; i < num_vfs; i++)
3535                 bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3536                                         (i * HWRM_MAX_REQ_LEN);
3537
3538         rc = bnxt_hwrm_func_buf_rgtr(bp);
3539         if (rc)
3540                 goto error_free;
3541
3542         populate_vf_func_cfg_req(bp, &req, num_vfs);
3543
3544         bp->pf->active_vfs = 0;
3545         for (i = 0; i < num_vfs; i++) {
3546                 add_random_mac_if_needed(bp, &req, i);
3547
3548                 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3549                 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3550                 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3551                 rc = bnxt_hwrm_send_message(bp,
3552                                             &req,
3553                                             sizeof(req),
3554                                             BNXT_USE_CHIMP_MB);
3555
3556                 /* Clear enable flag for next pass */
3557                 req.enables &= ~rte_cpu_to_le_32(
3558                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3559
3560                 if (rc || resp->error_code) {
3561                         PMD_DRV_LOG(ERR,
3562                                 "Failed to initizlie VF %d\n", i);
3563                         PMD_DRV_LOG(ERR,
3564                                 "Not all VFs available. (%d, %d)\n",
3565                                 rc, resp->error_code);
3566                         HWRM_UNLOCK();
3567                         break;
3568                 }
3569
3570                 HWRM_UNLOCK();
3571
3572                 reserve_resources_from_vf(bp, &req, i);
3573                 bp->pf->active_vfs++;
3574                 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3575         }
3576
3577         /*
3578          * Now configure the PF to use "the rest" of the resources
3579          * We're using STD_TX_RING_MODE here though which will limit the TX
3580          * rings.  This will allow QoS to function properly.  Not setting this
3581          * will cause PF rings to break bandwidth settings.
3582          */
3583         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3584         if (rc)
3585                 goto error_free;
3586
3587         rc = update_pf_resource_max(bp);
3588         if (rc)
3589                 goto error_free;
3590
3591         return rc;
3592
3593 error_free:
3594         bnxt_hwrm_func_buf_unrgtr(bp);
3595         return rc;
3596 }
3597
3598 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3599 {
3600         struct hwrm_func_cfg_input req = {0};
3601         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3602         int rc;
3603
3604         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3605
3606         req.fid = rte_cpu_to_le_16(0xffff);
3607         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3608         req.evb_mode = bp->pf->evb_mode;
3609
3610         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3611         HWRM_CHECK_RESULT();
3612         HWRM_UNLOCK();
3613
3614         return rc;
3615 }
3616
3617 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3618                                 uint8_t tunnel_type)
3619 {
3620         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3621         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3622         int rc = 0;
3623
3624         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3625         req.tunnel_type = tunnel_type;
3626         req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
3627         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3628         HWRM_CHECK_RESULT();
3629
3630         switch (tunnel_type) {
3631         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3632                 bp->vxlan_fw_dst_port_id =
3633                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3634                 bp->vxlan_port = port;
3635                 break;
3636         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3637                 bp->geneve_fw_dst_port_id =
3638                         rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3639                 bp->geneve_port = port;
3640                 break;
3641         default:
3642                 break;
3643         }
3644
3645         HWRM_UNLOCK();
3646
3647         return rc;
3648 }
3649
3650 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3651                                 uint8_t tunnel_type)
3652 {
3653         struct hwrm_tunnel_dst_port_free_input req = {0};
3654         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3655         int rc = 0;
3656
3657         HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3658
3659         req.tunnel_type = tunnel_type;
3660         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3661         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3662
3663         HWRM_CHECK_RESULT();
3664         HWRM_UNLOCK();
3665
3666         return rc;
3667 }
3668
3669 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3670                                         uint32_t flags)
3671 {
3672         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3673         struct hwrm_func_cfg_input req = {0};
3674         int rc;
3675
3676         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3677
3678         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3679         req.flags = rte_cpu_to_le_32(flags);
3680         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3681
3682         HWRM_CHECK_RESULT();
3683         HWRM_UNLOCK();
3684
3685         return rc;
3686 }
3687
3688 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3689 {
3690         uint32_t *flag = flagp;
3691
3692         vnic->flags = *flag;
3693 }
3694
3695 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3696 {
3697         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3698 }
3699
3700 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
3701 {
3702         int rc = 0;
3703         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3704         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3705
3706         HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3707
3708         req.req_buf_num_pages = rte_cpu_to_le_16(1);
3709         req.req_buf_page_size = rte_cpu_to_le_16(
3710                          page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN));
3711         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3712         req.req_buf_page_addr0 =
3713                 rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
3714         if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3715                 PMD_DRV_LOG(ERR,
3716                         "unable to map buffer address to physical memory\n");
3717                 return -ENOMEM;
3718         }
3719
3720         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3721
3722         HWRM_CHECK_RESULT();
3723         HWRM_UNLOCK();
3724
3725         return rc;
3726 }
3727
3728 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3729 {
3730         int rc = 0;
3731         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3732         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3733
3734         if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3735                 return 0;
3736
3737         HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3738
3739         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3740
3741         HWRM_CHECK_RESULT();
3742         HWRM_UNLOCK();
3743
3744         return rc;
3745 }
3746
3747 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3748 {
3749         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3750         struct hwrm_func_cfg_input req = {0};
3751         int rc;
3752
3753         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3754
3755         req.fid = rte_cpu_to_le_16(0xffff);
3756         req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3757         req.enables = rte_cpu_to_le_32(
3758                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3759         req.async_event_cr = rte_cpu_to_le_16(
3760                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3761         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3762
3763         HWRM_CHECK_RESULT();
3764         HWRM_UNLOCK();
3765
3766         return rc;
3767 }
3768
3769 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3770 {
3771         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3772         struct hwrm_func_vf_cfg_input req = {0};
3773         int rc;
3774
3775         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3776
3777         req.enables = rte_cpu_to_le_32(
3778                         HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3779         req.async_event_cr = rte_cpu_to_le_16(
3780                         bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3781         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3782
3783         HWRM_CHECK_RESULT();
3784         HWRM_UNLOCK();
3785
3786         return rc;
3787 }
3788
3789 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3790 {
3791         struct hwrm_func_cfg_input req = {0};
3792         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3793         uint16_t dflt_vlan, fid;
3794         uint32_t func_cfg_flags;
3795         int rc = 0;
3796
3797         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3798
3799         if (is_vf) {
3800                 dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
3801                 fid = bp->pf->vf_info[vf].fid;
3802                 func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
3803         } else {
3804                 fid = rte_cpu_to_le_16(0xffff);
3805                 func_cfg_flags = bp->pf->func_cfg_flags;
3806                 dflt_vlan = bp->vlan;
3807         }
3808
3809         req.flags = rte_cpu_to_le_32(func_cfg_flags);
3810         req.fid = rte_cpu_to_le_16(fid);
3811         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3812         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3813
3814         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3815
3816         HWRM_CHECK_RESULT();
3817         HWRM_UNLOCK();
3818
3819         return rc;
3820 }
3821
3822 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3823                         uint16_t max_bw, uint16_t enables)
3824 {
3825         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3826         struct hwrm_func_cfg_input req = {0};
3827         int rc;
3828
3829         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3830
3831         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3832         req.enables |= rte_cpu_to_le_32(enables);
3833         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
3834         req.max_bw = rte_cpu_to_le_32(max_bw);
3835         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3836
3837         HWRM_CHECK_RESULT();
3838         HWRM_UNLOCK();
3839
3840         return rc;
3841 }
3842
3843 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3844 {
3845         struct hwrm_func_cfg_input req = {0};
3846         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3847         int rc = 0;
3848
3849         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3850
3851         req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
3852         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3853         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3854         req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
3855
3856         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3857
3858         HWRM_CHECK_RESULT();
3859         HWRM_UNLOCK();
3860
3861         return rc;
3862 }
3863
3864 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3865 {
3866         int rc;
3867
3868         if (BNXT_PF(bp))
3869                 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3870         else
3871                 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3872
3873         return rc;
3874 }
3875
3876 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3877                               void *encaped, size_t ec_size)
3878 {
3879         int rc = 0;
3880         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3881         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3882
3883         if (ec_size > sizeof(req.encap_request))
3884                 return -1;
3885
3886         HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3887
3888         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3889         memcpy(req.encap_request, encaped, ec_size);
3890
3891         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3892
3893         HWRM_CHECK_RESULT();
3894         HWRM_UNLOCK();
3895
3896         return rc;
3897 }
3898
3899 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3900                                        struct rte_ether_addr *mac)
3901 {
3902         struct hwrm_func_qcfg_input req = {0};
3903         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3904         int rc;
3905
3906         HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3907
3908         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3909         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3910
3911         HWRM_CHECK_RESULT();
3912
3913         memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3914
3915         HWRM_UNLOCK();
3916
3917         return rc;
3918 }
3919
3920 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3921                             void *encaped, size_t ec_size)
3922 {
3923         int rc = 0;
3924         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3925         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3926
3927         if (ec_size > sizeof(req.encap_request))
3928                 return -1;
3929
3930         HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3931
3932         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3933         memcpy(req.encap_request, encaped, ec_size);
3934
3935         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3936
3937         HWRM_CHECK_RESULT();
3938         HWRM_UNLOCK();
3939
3940         return rc;
3941 }
3942
3943 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3944                          struct rte_eth_stats *stats, uint8_t rx)
3945 {
3946         int rc = 0;
3947         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3948         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3949
3950         HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3951
3952         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3953
3954         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3955
3956         HWRM_CHECK_RESULT();
3957
3958         if (rx) {
3959                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3960                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3961                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3962                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3963                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3964                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3965                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3966                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3967         } else {
3968                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3969                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3970                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3971                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3972                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3973                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3974         }
3975
3976         HWRM_UNLOCK();
3977
3978         return rc;
3979 }
3980
3981 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3982 {
3983         struct hwrm_port_qstats_input req = {0};
3984         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3985         struct bnxt_pf_info *pf = bp->pf;
3986         int rc;
3987
3988         HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
3989
3990         req.port_id = rte_cpu_to_le_16(pf->port_id);
3991         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3992         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3993         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3994
3995         HWRM_CHECK_RESULT();
3996         HWRM_UNLOCK();
3997
3998         return rc;
3999 }
4000
4001 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
4002 {
4003         struct hwrm_port_clr_stats_input req = {0};
4004         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
4005         struct bnxt_pf_info *pf = bp->pf;
4006         int rc;
4007
4008         /* Not allowed on NS2 device, NPAR, MultiHost, VF */
4009         if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
4010             BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
4011                 return 0;
4012
4013         HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4014
4015         req.port_id = rte_cpu_to_le_16(pf->port_id);
4016         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4017
4018         HWRM_CHECK_RESULT();
4019         HWRM_UNLOCK();
4020
4021         return rc;
4022 }
4023
4024 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4025 {
4026         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4027         struct hwrm_port_led_qcaps_input req = {0};
4028         int rc;
4029
4030         if (BNXT_VF(bp))
4031                 return 0;
4032
4033         HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4034         req.port_id = bp->pf->port_id;
4035         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4036
4037         HWRM_CHECK_RESULT();
4038
4039         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4040                 unsigned int i;
4041
4042                 bp->leds->num_leds = resp->num_leds;
4043                 memcpy(bp->leds, &resp->led0_id,
4044                         sizeof(bp->leds[0]) * bp->leds->num_leds);
4045                 for (i = 0; i < bp->leds->num_leds; i++) {
4046                         struct bnxt_led_info *led = &bp->leds[i];
4047
4048                         uint16_t caps = led->led_state_caps;
4049
4050                         if (!led->led_group_id ||
4051                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
4052                                 bp->leds->num_leds = 0;
4053                                 break;
4054                         }
4055                 }
4056         }
4057
4058         HWRM_UNLOCK();
4059
4060         return rc;
4061 }
4062
4063 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4064 {
4065         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4066         struct hwrm_port_led_cfg_input req = {0};
4067         struct bnxt_led_cfg *led_cfg;
4068         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4069         uint16_t duration = 0;
4070         int rc, i;
4071
4072         if (!bp->leds->num_leds || BNXT_VF(bp))
4073                 return -EOPNOTSUPP;
4074
4075         HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4076
4077         if (led_on) {
4078                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4079                 duration = rte_cpu_to_le_16(500);
4080         }
4081         req.port_id = bp->pf->port_id;
4082         req.num_leds = bp->leds->num_leds;
4083         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4084         for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4085                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
4086                 led_cfg->led_id = bp->leds[i].led_id;
4087                 led_cfg->led_state = led_state;
4088                 led_cfg->led_blink_on = duration;
4089                 led_cfg->led_blink_off = duration;
4090                 led_cfg->led_group_id = bp->leds[i].led_group_id;
4091         }
4092
4093         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4094
4095         HWRM_CHECK_RESULT();
4096         HWRM_UNLOCK();
4097
4098         return rc;
4099 }
4100
4101 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4102                                uint32_t *length)
4103 {
4104         int rc;
4105         struct hwrm_nvm_get_dir_info_input req = {0};
4106         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4107
4108         HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4109
4110         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4111
4112         HWRM_CHECK_RESULT();
4113
4114         *entries = rte_le_to_cpu_32(resp->entries);
4115         *length = rte_le_to_cpu_32(resp->entry_length);
4116
4117         HWRM_UNLOCK();
4118         return rc;
4119 }
4120
4121 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4122 {
4123         int rc;
4124         uint32_t dir_entries;
4125         uint32_t entry_length;
4126         uint8_t *buf;
4127         size_t buflen;
4128         rte_iova_t dma_handle;
4129         struct hwrm_nvm_get_dir_entries_input req = {0};
4130         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4131
4132         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4133         if (rc != 0)
4134                 return rc;
4135
4136         *data++ = dir_entries;
4137         *data++ = entry_length;
4138         len -= 2;
4139         memset(data, 0xff, len);
4140
4141         buflen = dir_entries * entry_length;
4142         buf = rte_malloc("nvm_dir", buflen, 0);
4143         if (buf == NULL)
4144                 return -ENOMEM;
4145         dma_handle = rte_malloc_virt2iova(buf);
4146         if (dma_handle == RTE_BAD_IOVA) {
4147                 PMD_DRV_LOG(ERR,
4148                         "unable to map response address to physical memory\n");
4149                 return -ENOMEM;
4150         }
4151         HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4152         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4153         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4154
4155         if (rc == 0)
4156                 memcpy(data, buf, len > buflen ? buflen : len);
4157
4158         rte_free(buf);
4159         HWRM_CHECK_RESULT();
4160         HWRM_UNLOCK();
4161
4162         return rc;
4163 }
4164
4165 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4166                              uint32_t offset, uint32_t length,
4167                              uint8_t *data)
4168 {
4169         int rc;
4170         uint8_t *buf;
4171         rte_iova_t dma_handle;
4172         struct hwrm_nvm_read_input req = {0};
4173         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4174
4175         buf = rte_malloc("nvm_item", length, 0);
4176         if (!buf)
4177                 return -ENOMEM;
4178
4179         dma_handle = rte_malloc_virt2iova(buf);
4180         if (dma_handle == RTE_BAD_IOVA) {
4181                 PMD_DRV_LOG(ERR,
4182                         "unable to map response address to physical memory\n");
4183                 return -ENOMEM;
4184         }
4185         HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4186         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4187         req.dir_idx = rte_cpu_to_le_16(index);
4188         req.offset = rte_cpu_to_le_32(offset);
4189         req.len = rte_cpu_to_le_32(length);
4190         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4191         if (rc == 0)
4192                 memcpy(data, buf, length);
4193
4194         rte_free(buf);
4195         HWRM_CHECK_RESULT();
4196         HWRM_UNLOCK();
4197
4198         return rc;
4199 }
4200
4201 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4202 {
4203         int rc;
4204         struct hwrm_nvm_erase_dir_entry_input req = {0};
4205         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4206
4207         HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4208         req.dir_idx = rte_cpu_to_le_16(index);
4209         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4210         HWRM_CHECK_RESULT();
4211         HWRM_UNLOCK();
4212
4213         return rc;
4214 }
4215
4216
4217 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4218                           uint16_t dir_ordinal, uint16_t dir_ext,
4219                           uint16_t dir_attr, const uint8_t *data,
4220                           size_t data_len)
4221 {
4222         int rc;
4223         struct hwrm_nvm_write_input req = {0};
4224         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4225         rte_iova_t dma_handle;
4226         uint8_t *buf;
4227
4228         buf = rte_malloc("nvm_write", data_len, 0);
4229         if (!buf)
4230                 return -ENOMEM;
4231
4232         dma_handle = rte_malloc_virt2iova(buf);
4233         if (dma_handle == RTE_BAD_IOVA) {
4234                 PMD_DRV_LOG(ERR,
4235                         "unable to map response address to physical memory\n");
4236                 return -ENOMEM;
4237         }
4238         memcpy(buf, data, data_len);
4239
4240         HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4241
4242         req.dir_type = rte_cpu_to_le_16(dir_type);
4243         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4244         req.dir_ext = rte_cpu_to_le_16(dir_ext);
4245         req.dir_attr = rte_cpu_to_le_16(dir_attr);
4246         req.dir_data_length = rte_cpu_to_le_32(data_len);
4247         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4248
4249         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4250
4251         rte_free(buf);
4252         HWRM_CHECK_RESULT();
4253         HWRM_UNLOCK();
4254
4255         return rc;
4256 }
4257
4258 static void
4259 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4260 {
4261         uint32_t *count = cbdata;
4262
4263         *count = *count + 1;
4264 }
4265
4266 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4267                                      struct bnxt_vnic_info *vnic __rte_unused)
4268 {
4269         return 0;
4270 }
4271
4272 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4273 {
4274         uint32_t count = 0;
4275
4276         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4277             &count, bnxt_vnic_count_hwrm_stub);
4278
4279         return count;
4280 }
4281
4282 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4283                                         uint16_t *vnic_ids)
4284 {
4285         struct hwrm_func_vf_vnic_ids_query_input req = {0};
4286         struct hwrm_func_vf_vnic_ids_query_output *resp =
4287                                                 bp->hwrm_cmd_resp_addr;
4288         int rc;
4289
4290         /* First query all VNIC ids */
4291         HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4292
4293         req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4294         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4295         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4296
4297         if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4298                 HWRM_UNLOCK();
4299                 PMD_DRV_LOG(ERR,
4300                 "unable to map VNIC ID table address to physical memory\n");
4301                 return -ENOMEM;
4302         }
4303         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4304         HWRM_CHECK_RESULT();
4305         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4306
4307         HWRM_UNLOCK();
4308
4309         return rc;
4310 }
4311
4312 /*
4313  * This function queries the VNIC IDs  for a specified VF. It then calls
4314  * the vnic_cb to update the necessary field in vnic_info with cbdata.
4315  * Then it calls the hwrm_cb function to program this new vnic configuration.
4316  */
4317 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4318         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4319         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4320 {
4321         struct bnxt_vnic_info vnic;
4322         int rc = 0;
4323         int i, num_vnic_ids;
4324         uint16_t *vnic_ids;
4325         size_t vnic_id_sz;
4326         size_t sz;
4327
4328         /* First query all VNIC ids */
4329         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4330         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4331                         RTE_CACHE_LINE_SIZE);
4332         if (vnic_ids == NULL)
4333                 return -ENOMEM;
4334
4335         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4336                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4337
4338         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4339
4340         if (num_vnic_ids < 0)
4341                 return num_vnic_ids;
4342
4343         /* Retrieve VNIC, update bd_stall then update */
4344
4345         for (i = 0; i < num_vnic_ids; i++) {
4346                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4347                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4348                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4349                 if (rc)
4350                         break;
4351                 if (vnic.mru <= 4)      /* Indicates unallocated */
4352                         continue;
4353
4354                 vnic_cb(&vnic, cbdata);
4355
4356                 rc = hwrm_cb(bp, &vnic);
4357                 if (rc)
4358                         break;
4359         }
4360
4361         rte_free(vnic_ids);
4362
4363         return rc;
4364 }
4365
4366 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4367                                               bool on)
4368 {
4369         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4370         struct hwrm_func_cfg_input req = {0};
4371         int rc;
4372
4373         HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4374
4375         req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4376         req.enables |= rte_cpu_to_le_32(
4377                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4378         req.vlan_antispoof_mode = on ?
4379                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4380                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4381         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4382
4383         HWRM_CHECK_RESULT();
4384         HWRM_UNLOCK();
4385
4386         return rc;
4387 }
4388
4389 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4390 {
4391         struct bnxt_vnic_info vnic;
4392         uint16_t *vnic_ids;
4393         size_t vnic_id_sz;
4394         int num_vnic_ids, i;
4395         size_t sz;
4396         int rc;
4397
4398         vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4399         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4400                         RTE_CACHE_LINE_SIZE);
4401         if (vnic_ids == NULL)
4402                 return -ENOMEM;
4403
4404         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4405                 rte_mem_lock_page(((char *)vnic_ids) + sz);
4406
4407         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4408         if (rc <= 0)
4409                 goto exit;
4410         num_vnic_ids = rc;
4411
4412         /*
4413          * Loop through to find the default VNIC ID.
4414          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4415          * by sending the hwrm_func_qcfg command to the firmware.
4416          */
4417         for (i = 0; i < num_vnic_ids; i++) {
4418                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4419                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4420                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4421                                         bp->pf->first_vf_id + vf);
4422                 if (rc)
4423                         goto exit;
4424                 if (vnic.func_default) {
4425                         rte_free(vnic_ids);
4426                         return vnic.fw_vnic_id;
4427                 }
4428         }
4429         /* Could not find a default VNIC. */
4430         PMD_DRV_LOG(ERR, "No default VNIC\n");
4431 exit:
4432         rte_free(vnic_ids);
4433         return rc;
4434 }
4435
4436 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4437                          uint16_t dst_id,
4438                          struct bnxt_filter_info *filter)
4439 {
4440         int rc = 0;
4441         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4442         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4443         uint32_t enables = 0;
4444
4445         if (filter->fw_em_filter_id != UINT64_MAX)
4446                 bnxt_hwrm_clear_em_filter(bp, filter);
4447
4448         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4449
4450         req.flags = rte_cpu_to_le_32(filter->flags);
4451
4452         enables = filter->enables |
4453               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4454         req.dst_id = rte_cpu_to_le_16(dst_id);
4455
4456         if (filter->ip_addr_type) {
4457                 req.ip_addr_type = filter->ip_addr_type;
4458                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4459         }
4460         if (enables &
4461             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4462                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4463         if (enables &
4464             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4465                 memcpy(req.src_macaddr, filter->src_macaddr,
4466                        RTE_ETHER_ADDR_LEN);
4467         if (enables &
4468             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4469                 memcpy(req.dst_macaddr, filter->dst_macaddr,
4470                        RTE_ETHER_ADDR_LEN);
4471         if (enables &
4472             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4473                 req.ovlan_vid = filter->l2_ovlan;
4474         if (enables &
4475             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4476                 req.ivlan_vid = filter->l2_ivlan;
4477         if (enables &
4478             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4479                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4480         if (enables &
4481             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4482                 req.ip_protocol = filter->ip_protocol;
4483         if (enables &
4484             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4485                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4486         if (enables &
4487             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4488                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4489         if (enables &
4490             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4491                 req.src_port = rte_cpu_to_be_16(filter->src_port);
4492         if (enables &
4493             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4494                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4495         if (enables &
4496             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4497                 req.mirror_vnic_id = filter->mirror_vnic_id;
4498
4499         req.enables = rte_cpu_to_le_32(enables);
4500
4501         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4502
4503         HWRM_CHECK_RESULT();
4504
4505         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4506         HWRM_UNLOCK();
4507
4508         return rc;
4509 }
4510
4511 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4512 {
4513         int rc = 0;
4514         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4515         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4516
4517         if (filter->fw_em_filter_id == UINT64_MAX)
4518                 return 0;
4519
4520         HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4521
4522         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4523
4524         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4525
4526         HWRM_CHECK_RESULT();
4527         HWRM_UNLOCK();
4528
4529         filter->fw_em_filter_id = UINT64_MAX;
4530         filter->fw_l2_filter_id = UINT64_MAX;
4531
4532         return 0;
4533 }
4534
4535 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4536                          uint16_t dst_id,
4537                          struct bnxt_filter_info *filter)
4538 {
4539         int rc = 0;
4540         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4541         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4542                                                 bp->hwrm_cmd_resp_addr;
4543         uint32_t enables = 0;
4544
4545         if (filter->fw_ntuple_filter_id != UINT64_MAX)
4546                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4547
4548         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4549
4550         req.flags = rte_cpu_to_le_32(filter->flags);
4551
4552         enables = filter->enables |
4553               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4554         req.dst_id = rte_cpu_to_le_16(dst_id);
4555
4556         if (filter->ip_addr_type) {
4557                 req.ip_addr_type = filter->ip_addr_type;
4558                 enables |=
4559                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4560         }
4561         if (enables &
4562             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4563                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4564         if (enables &
4565             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4566                 memcpy(req.src_macaddr, filter->src_macaddr,
4567                        RTE_ETHER_ADDR_LEN);
4568         if (enables &
4569             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4570                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4571         if (enables &
4572             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4573                 req.ip_protocol = filter->ip_protocol;
4574         if (enables &
4575             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4576                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4577         if (enables &
4578             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4579                 req.src_ipaddr_mask[0] =
4580                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4581         if (enables &
4582             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4583                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4584         if (enables &
4585             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4586                 req.dst_ipaddr_mask[0] =
4587                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4588         if (enables &
4589             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4590                 req.src_port = rte_cpu_to_le_16(filter->src_port);
4591         if (enables &
4592             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4593                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4594         if (enables &
4595             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4596                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4597         if (enables &
4598             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4599                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4600         if (enables &
4601             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4602                 req.mirror_vnic_id = filter->mirror_vnic_id;
4603
4604         req.enables = rte_cpu_to_le_32(enables);
4605
4606         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4607
4608         HWRM_CHECK_RESULT();
4609
4610         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4611         filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
4612         HWRM_UNLOCK();
4613
4614         return rc;
4615 }
4616
4617 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4618                                 struct bnxt_filter_info *filter)
4619 {
4620         int rc = 0;
4621         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4622         struct hwrm_cfa_ntuple_filter_free_output *resp =
4623                                                 bp->hwrm_cmd_resp_addr;
4624
4625         if (filter->fw_ntuple_filter_id == UINT64_MAX)
4626                 return 0;
4627
4628         HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4629
4630         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4631
4632         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4633
4634         HWRM_CHECK_RESULT();
4635         HWRM_UNLOCK();
4636
4637         filter->fw_ntuple_filter_id = UINT64_MAX;
4638
4639         return 0;
4640 }
4641
4642 static int
4643 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4644 {
4645         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4646         uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4647         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4648         struct bnxt_rx_queue **rxqs = bp->rx_queues;
4649         uint16_t *ring_tbl = vnic->rss_table;
4650         int nr_ctxs = vnic->num_lb_ctxts;
4651         int max_rings = bp->rx_nr_rings;
4652         int i, j, k, cnt;
4653         int rc = 0;
4654
4655         for (i = 0, k = 0; i < nr_ctxs; i++) {
4656                 struct bnxt_rx_ring_info *rxr;
4657                 struct bnxt_cp_ring_info *cpr;
4658
4659                 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4660
4661                 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4662                 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4663                 req.hash_mode_flags = vnic->hash_mode;
4664
4665                 req.ring_grp_tbl_addr =
4666                     rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4667                                      i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4668                                      2 * sizeof(*ring_tbl));
4669                 req.hash_key_tbl_addr =
4670                     rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4671
4672                 req.ring_table_pair_index = i;
4673                 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4674
4675                 for (j = 0; j < 64; j++) {
4676                         uint16_t ring_id;
4677
4678                         /* Find next active ring. */
4679                         for (cnt = 0; cnt < max_rings; cnt++) {
4680                                 if (rx_queue_state[k] !=
4681                                                 RTE_ETH_QUEUE_STATE_STOPPED)
4682                                         break;
4683                                 if (++k == max_rings)
4684                                         k = 0;
4685                         }
4686
4687                         /* Return if no rings are active. */
4688                         if (cnt == max_rings) {
4689                                 HWRM_UNLOCK();
4690                                 return 0;
4691                         }
4692
4693                         /* Add rx/cp ring pair to RSS table. */
4694                         rxr = rxqs[k]->rx_ring;
4695                         cpr = rxqs[k]->cp_ring;
4696
4697                         ring_id = rxr->rx_ring_struct->fw_ring_id;
4698                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4699                         ring_id = cpr->cp_ring_struct->fw_ring_id;
4700                         *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4701
4702                         if (++k == max_rings)
4703                                 k = 0;
4704                 }
4705                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4706                                             BNXT_USE_CHIMP_MB);
4707
4708                 HWRM_CHECK_RESULT();
4709                 HWRM_UNLOCK();
4710         }
4711
4712         return rc;
4713 }
4714
4715 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4716 {
4717         unsigned int rss_idx, fw_idx, i;
4718
4719         if (!(vnic->rss_table && vnic->hash_type))
4720                 return 0;
4721
4722         if (BNXT_CHIP_THOR(bp))
4723                 return bnxt_vnic_rss_configure_thor(bp, vnic);
4724
4725         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4726                 return 0;
4727
4728         if (vnic->rss_table && vnic->hash_type) {
4729                 /*
4730                  * Fill the RSS hash & redirection table with
4731                  * ring group ids for all VNICs
4732                  */
4733                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4734                         rss_idx++, fw_idx++) {
4735                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4736                                 fw_idx %= bp->rx_cp_nr_rings;
4737                                 if (vnic->fw_grp_ids[fw_idx] !=
4738                                     INVALID_HW_RING_ID)
4739                                         break;
4740                                 fw_idx++;
4741                         }
4742                         if (i == bp->rx_cp_nr_rings)
4743                                 return 0;
4744                         vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4745                 }
4746                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4747         }
4748
4749         return 0;
4750 }
4751
4752 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4753         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4754 {
4755         uint16_t flags;
4756
4757         req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4758
4759         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4760         req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4761
4762         /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4763         req->num_cmpl_dma_aggr_during_int =
4764                 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4765
4766         req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4767
4768         /* min timer set to 1/2 of interrupt timer */
4769         req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4770
4771         /* buf timer set to 1/4 of interrupt timer */
4772         req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4773
4774         req->cmpl_aggr_dma_tmr_during_int =
4775                 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4776
4777         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4778                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4779         req->flags = rte_cpu_to_le_16(flags);
4780 }
4781
4782 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4783                 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4784 {
4785         struct hwrm_ring_aggint_qcaps_input req = {0};
4786         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4787         uint32_t enables;
4788         uint16_t flags;
4789         int rc;
4790
4791         HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4792         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4793         HWRM_CHECK_RESULT();
4794
4795         agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4796         agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4797
4798         flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4799                 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4800         agg_req->flags = rte_cpu_to_le_16(flags);
4801         enables =
4802          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4803          HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4804         agg_req->enables = rte_cpu_to_le_32(enables);
4805
4806         HWRM_UNLOCK();
4807         return rc;
4808 }
4809
4810 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4811                         struct bnxt_coal *coal, uint16_t ring_id)
4812 {
4813         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4814         struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4815                                                 bp->hwrm_cmd_resp_addr;
4816         int rc;
4817
4818         /* Set ring coalesce parameters only for 100G NICs */
4819         if (BNXT_CHIP_THOR(bp)) {
4820                 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4821                         return -1;
4822         } else if (bnxt_stratus_device(bp)) {
4823                 bnxt_hwrm_set_coal_params(coal, &req);
4824         } else {
4825                 return 0;
4826         }
4827
4828         HWRM_PREP(&req,
4829                   HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
4830                   BNXT_USE_CHIMP_MB);
4831         req.ring_id = rte_cpu_to_le_16(ring_id);
4832         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4833         HWRM_CHECK_RESULT();
4834         HWRM_UNLOCK();
4835         return 0;
4836 }
4837
4838 #define BNXT_RTE_MEMZONE_FLAG  (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4839 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4840 {
4841         struct hwrm_func_backing_store_qcaps_input req = {0};
4842         struct hwrm_func_backing_store_qcaps_output *resp =
4843                 bp->hwrm_cmd_resp_addr;
4844         struct bnxt_ctx_pg_info *ctx_pg;
4845         struct bnxt_ctx_mem_info *ctx;
4846         int total_alloc_len;
4847         int rc, i, tqm_rings;
4848
4849         if (!BNXT_CHIP_THOR(bp) ||
4850             bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
4851             BNXT_VF(bp) ||
4852             bp->ctx)
4853                 return 0;
4854
4855         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
4856         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4857         HWRM_CHECK_RESULT_SILENT();
4858
4859         total_alloc_len = sizeof(*ctx);
4860         ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
4861                           RTE_CACHE_LINE_SIZE);
4862         if (!ctx) {
4863                 rc = -ENOMEM;
4864                 goto ctx_err;
4865         }
4866
4867         ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
4868         ctx->qp_min_qp1_entries =
4869                 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
4870         ctx->qp_max_l2_entries =
4871                 rte_le_to_cpu_16(resp->qp_max_l2_entries);
4872         ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
4873         ctx->srq_max_l2_entries =
4874                 rte_le_to_cpu_16(resp->srq_max_l2_entries);
4875         ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
4876         ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
4877         ctx->cq_max_l2_entries =
4878                 rte_le_to_cpu_16(resp->cq_max_l2_entries);
4879         ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
4880         ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
4881         ctx->vnic_max_vnic_entries =
4882                 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
4883         ctx->vnic_max_ring_table_entries =
4884                 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
4885         ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
4886         ctx->stat_max_entries =
4887                 rte_le_to_cpu_32(resp->stat_max_entries);
4888         ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
4889         ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
4890         ctx->tqm_min_entries_per_ring =
4891                 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
4892         ctx->tqm_max_entries_per_ring =
4893                 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
4894         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
4895         if (!ctx->tqm_entries_multiple)
4896                 ctx->tqm_entries_multiple = 1;
4897         ctx->mrav_max_entries =
4898                 rte_le_to_cpu_32(resp->mrav_max_entries);
4899         ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
4900         ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
4901         ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
4902         ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
4903
4904         if (!ctx->tqm_fp_rings_count)
4905                 ctx->tqm_fp_rings_count = bp->max_q;
4906
4907         tqm_rings = ctx->tqm_fp_rings_count + 1;
4908
4909         ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
4910                             sizeof(*ctx_pg) * tqm_rings,
4911                             RTE_CACHE_LINE_SIZE);
4912         if (!ctx_pg) {
4913                 rc = -ENOMEM;
4914                 goto ctx_err;
4915         }
4916         for (i = 0; i < tqm_rings; i++, ctx_pg++)
4917                 ctx->tqm_mem[i] = ctx_pg;
4918
4919         bp->ctx = ctx;
4920 ctx_err:
4921         HWRM_UNLOCK();
4922         return rc;
4923 }
4924
4925 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
4926 {
4927         struct hwrm_func_backing_store_cfg_input req = {0};
4928         struct hwrm_func_backing_store_cfg_output *resp =
4929                 bp->hwrm_cmd_resp_addr;
4930         struct bnxt_ctx_mem_info *ctx = bp->ctx;
4931         struct bnxt_ctx_pg_info *ctx_pg;
4932         uint32_t *num_entries;
4933         uint64_t *pg_dir;
4934         uint8_t *pg_attr;
4935         uint32_t ena;
4936         int i, rc;
4937
4938         if (!ctx)
4939                 return 0;
4940
4941         HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
4942         req.enables = rte_cpu_to_le_32(enables);
4943
4944         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
4945                 ctx_pg = &ctx->qp_mem;
4946                 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4947                 req.qp_num_qp1_entries =
4948                         rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
4949                 req.qp_num_l2_entries =
4950                         rte_cpu_to_le_16(ctx->qp_max_l2_entries);
4951                 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
4952                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4953                                       &req.qpc_pg_size_qpc_lvl,
4954                                       &req.qpc_page_dir);
4955         }
4956
4957         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
4958                 ctx_pg = &ctx->srq_mem;
4959                 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4960                 req.srq_num_l2_entries =
4961                                  rte_cpu_to_le_16(ctx->srq_max_l2_entries);
4962                 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
4963                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4964                                       &req.srq_pg_size_srq_lvl,
4965                                       &req.srq_page_dir);
4966         }
4967
4968         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
4969                 ctx_pg = &ctx->cq_mem;
4970                 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4971                 req.cq_num_l2_entries =
4972                                 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
4973                 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
4974                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4975                                       &req.cq_pg_size_cq_lvl,
4976                                       &req.cq_page_dir);
4977         }
4978
4979         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
4980                 ctx_pg = &ctx->vnic_mem;
4981                 req.vnic_num_vnic_entries =
4982                         rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
4983                 req.vnic_num_ring_table_entries =
4984                         rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
4985                 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
4986                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4987                                       &req.vnic_pg_size_vnic_lvl,
4988                                       &req.vnic_page_dir);
4989         }
4990
4991         if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
4992                 ctx_pg = &ctx->stat_mem;
4993                 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
4994                 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
4995                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4996                                       &req.stat_pg_size_stat_lvl,
4997                                       &req.stat_page_dir);
4998         }
4999
5000         req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5001         num_entries = &req.tqm_sp_num_entries;
5002         pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
5003         pg_dir = &req.tqm_sp_page_dir;
5004         ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
5005         for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
5006                 if (!(enables & ena))
5007                         continue;
5008
5009                 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5010
5011                 ctx_pg = ctx->tqm_mem[i];
5012                 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5013                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5014         }
5015
5016         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5017         HWRM_CHECK_RESULT();
5018         HWRM_UNLOCK();
5019
5020         return rc;
5021 }
5022
5023 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5024 {
5025         struct hwrm_port_qstats_ext_input req = {0};
5026         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5027         struct bnxt_pf_info *pf = bp->pf;
5028         int rc;
5029
5030         if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5031               bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5032                 return 0;
5033
5034         HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5035
5036         req.port_id = rte_cpu_to_le_16(pf->port_id);
5037         if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5038                 req.tx_stat_host_addr =
5039                         rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5040                 req.tx_stat_size =
5041                         rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5042         }
5043         if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5044                 req.rx_stat_host_addr =
5045                         rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5046                 req.rx_stat_size =
5047                         rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5048         }
5049         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5050
5051         if (rc) {
5052                 bp->fw_rx_port_stats_ext_size = 0;
5053                 bp->fw_tx_port_stats_ext_size = 0;
5054         } else {
5055                 bp->fw_rx_port_stats_ext_size =
5056                         rte_le_to_cpu_16(resp->rx_stat_size);
5057                 bp->fw_tx_port_stats_ext_size =
5058                         rte_le_to_cpu_16(resp->tx_stat_size);
5059         }
5060
5061         HWRM_CHECK_RESULT();
5062         HWRM_UNLOCK();
5063
5064         return rc;
5065 }
5066
5067 int
5068 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5069 {
5070         struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5071         struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5072                 bp->hwrm_cmd_resp_addr;
5073         int rc = 0;
5074
5075         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5076         req.tunnel_type = type;
5077         req.dest_fid = bp->fw_fid;
5078         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5079         HWRM_CHECK_RESULT();
5080
5081         HWRM_UNLOCK();
5082
5083         return rc;
5084 }
5085
5086 int
5087 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5088 {
5089         struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5090         struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5091                 bp->hwrm_cmd_resp_addr;
5092         int rc = 0;
5093
5094         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5095         req.tunnel_type = type;
5096         req.dest_fid = bp->fw_fid;
5097         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5098         HWRM_CHECK_RESULT();
5099
5100         HWRM_UNLOCK();
5101
5102         return rc;
5103 }
5104
5105 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5106 {
5107         struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5108         struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5109                 bp->hwrm_cmd_resp_addr;
5110         int rc = 0;
5111
5112         HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5113         req.src_fid = bp->fw_fid;
5114         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5115         HWRM_CHECK_RESULT();
5116
5117         if (type)
5118                 *type = rte_le_to_cpu_32(resp->tunnel_mask);
5119
5120         HWRM_UNLOCK();
5121
5122         return rc;
5123 }
5124
5125 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5126                                    uint16_t *dst_fid)
5127 {
5128         struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5129         struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5130                 bp->hwrm_cmd_resp_addr;
5131         int rc = 0;
5132
5133         HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5134         req.src_fid = bp->fw_fid;
5135         req.tunnel_type = tun_type;
5136         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5137         HWRM_CHECK_RESULT();
5138
5139         if (dst_fid)
5140                 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5141
5142         PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5143
5144         HWRM_UNLOCK();
5145
5146         return rc;
5147 }
5148
5149 int bnxt_hwrm_set_mac(struct bnxt *bp)
5150 {
5151         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5152         struct hwrm_func_vf_cfg_input req = {0};
5153         int rc = 0;
5154
5155         if (!BNXT_VF(bp))
5156                 return 0;
5157
5158         HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5159
5160         req.enables =
5161                 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5162         memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5163
5164         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5165
5166         HWRM_CHECK_RESULT();
5167
5168         HWRM_UNLOCK();
5169
5170         return rc;
5171 }
5172
5173 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5174 {
5175         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5176         struct hwrm_func_drv_if_change_input req = {0};
5177         uint32_t flags;
5178         int rc;
5179
5180         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5181                 return 0;
5182
5183         /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5184          * If we issue FUNC_DRV_IF_CHANGE with flags down before
5185          * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5186          */
5187         if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5188                 return 0;
5189
5190         HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5191
5192         if (up)
5193                 req.flags =
5194                 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5195
5196         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5197
5198         HWRM_CHECK_RESULT();
5199         flags = rte_le_to_cpu_32(resp->flags);
5200         HWRM_UNLOCK();
5201
5202         if (!up)
5203                 return 0;
5204
5205         if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5206                 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5207                 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5208         }
5209
5210         return 0;
5211 }
5212
5213 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5214 {
5215         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5216         struct bnxt_error_recovery_info *info = bp->recovery_info;
5217         struct hwrm_error_recovery_qcfg_input req = {0};
5218         uint32_t flags = 0;
5219         unsigned int i;
5220         int rc;
5221
5222         /* Older FW does not have error recovery support */
5223         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5224                 return 0;
5225
5226         HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5227
5228         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5229
5230         HWRM_CHECK_RESULT();
5231
5232         flags = rte_le_to_cpu_32(resp->flags);
5233         if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5234                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5235         else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5236                 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5237
5238         if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5239             !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5240                 rc = -EINVAL;
5241                 goto err;
5242         }
5243
5244         /* FW returned values are in units of 100msec */
5245         info->driver_polling_freq =
5246                 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5247         info->master_func_wait_period =
5248                 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5249         info->normal_func_wait_period =
5250                 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5251         info->master_func_wait_period_after_reset =
5252                 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5253         info->max_bailout_time_after_reset =
5254                 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5255         info->status_regs[BNXT_FW_STATUS_REG] =
5256                 rte_le_to_cpu_32(resp->fw_health_status_reg);
5257         info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5258                 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5259         info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5260                 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5261         info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5262                 rte_le_to_cpu_32(resp->reset_inprogress_reg);
5263         info->reg_array_cnt =
5264                 rte_le_to_cpu_32(resp->reg_array_cnt);
5265
5266         if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5267                 rc = -EINVAL;
5268                 goto err;
5269         }
5270
5271         for (i = 0; i < info->reg_array_cnt; i++) {
5272                 info->reset_reg[i] =
5273                         rte_le_to_cpu_32(resp->reset_reg[i]);
5274                 info->reset_reg_val[i] =
5275                         rte_le_to_cpu_32(resp->reset_reg_val[i]);
5276                 info->delay_after_reset[i] =
5277                         resp->delay_after_reset[i];
5278         }
5279 err:
5280         HWRM_UNLOCK();
5281
5282         /* Map the FW status registers */
5283         if (!rc)
5284                 rc = bnxt_map_fw_health_status_regs(bp);
5285
5286         if (rc) {
5287                 rte_free(bp->recovery_info);
5288                 bp->recovery_info = NULL;
5289         }
5290         return rc;
5291 }
5292
5293 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5294 {
5295         struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5296         struct hwrm_fw_reset_input req = {0};
5297         int rc;
5298
5299         if (!BNXT_PF(bp))
5300                 return -EOPNOTSUPP;
5301
5302         HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5303
5304         req.embedded_proc_type =
5305                 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5306         req.selfrst_status =
5307                 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5308         req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5309
5310         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5311                                     BNXT_USE_KONG(bp));
5312
5313         HWRM_CHECK_RESULT();
5314         HWRM_UNLOCK();
5315
5316         return rc;
5317 }
5318
5319 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5320 {
5321         struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5322         struct hwrm_port_ts_query_input req = {0};
5323         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5324         uint32_t flags = 0;
5325         int rc;
5326
5327         if (!ptp)
5328                 return 0;
5329
5330         HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5331
5332         switch (path) {
5333         case BNXT_PTP_FLAGS_PATH_TX:
5334                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5335                 break;
5336         case BNXT_PTP_FLAGS_PATH_RX:
5337                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5338                 break;
5339         case BNXT_PTP_FLAGS_CURRENT_TIME:
5340                 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5341                 break;
5342         }
5343
5344         req.flags = rte_cpu_to_le_32(flags);
5345         req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5346
5347         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5348
5349         HWRM_CHECK_RESULT();
5350
5351         if (timestamp) {
5352                 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5353                 *timestamp |=
5354                         (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5355         }
5356         HWRM_UNLOCK();
5357
5358         return rc;
5359 }
5360
5361 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5362 {
5363         int rc = 0;
5364
5365         struct hwrm_cfa_counter_qcaps_input req = {0};
5366         struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5367
5368         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5369                 PMD_DRV_LOG(DEBUG,
5370                             "Not a PF or trusted VF. Command not supported\n");
5371                 return 0;
5372         }
5373
5374         HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5375         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5376         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5377
5378         HWRM_CHECK_RESULT();
5379         if (max_fc)
5380                 *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5381         HWRM_UNLOCK();
5382
5383         return 0;
5384 }
5385
5386 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5387 {
5388         int rc = 0;
5389         struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5390         struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5391
5392         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5393                 PMD_DRV_LOG(DEBUG,
5394                             "Not a PF or trusted VF. Command not supported\n");
5395                 return 0;
5396         }
5397
5398         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5399
5400         req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5401         req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5402         req.page_dir = rte_cpu_to_le_64(dma_addr);
5403
5404         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5405
5406         HWRM_CHECK_RESULT();
5407         if (ctx_id) {
5408                 *ctx_id  = rte_le_to_cpu_16(resp->ctx_id);
5409                 PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5410         }
5411         HWRM_UNLOCK();
5412
5413         return 0;
5414 }
5415
5416 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5417 {
5418         int rc = 0;
5419         struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5420         struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5421
5422         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5423                 PMD_DRV_LOG(DEBUG,
5424                             "Not a PF or trusted VF. Command not supported\n");
5425                 return 0;
5426         }
5427
5428         HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5429
5430         req.ctx_id = rte_cpu_to_le_16(ctx_id);
5431
5432         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5433
5434         HWRM_CHECK_RESULT();
5435         HWRM_UNLOCK();
5436
5437         return rc;
5438 }
5439
5440 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5441                               uint16_t cntr, uint16_t ctx_id,
5442                               uint32_t num_entries, bool enable)
5443 {
5444         struct hwrm_cfa_counter_cfg_input req = {0};
5445         struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5446         uint16_t flags = 0;
5447         int rc;
5448
5449         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5450                 PMD_DRV_LOG(DEBUG,
5451                             "Not a PF or trusted VF. Command not supported\n");
5452                 return 0;
5453         }
5454
5455         HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5456
5457         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5458         req.counter_type = rte_cpu_to_le_16(cntr);
5459         flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5460                 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5461         flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5462         if (dir == BNXT_DIR_RX)
5463                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5464         else if (dir == BNXT_DIR_TX)
5465                 flags |=  HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5466         req.flags = rte_cpu_to_le_16(flags);
5467         req.ctx_id =  rte_cpu_to_le_16(ctx_id);
5468         req.num_entries = rte_cpu_to_le_32(num_entries);
5469
5470         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5471         HWRM_CHECK_RESULT();
5472         HWRM_UNLOCK();
5473
5474         return 0;
5475 }
5476
5477 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5478                                  enum bnxt_flow_dir dir,
5479                                  uint16_t cntr,
5480                                  uint16_t num_entries)
5481 {
5482         struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5483         struct hwrm_cfa_counter_qstats_input req = {0};
5484         uint16_t flow_ctx_id = 0;
5485         uint16_t flags = 0;
5486         int rc = 0;
5487
5488         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5489                 PMD_DRV_LOG(DEBUG,
5490                             "Not a PF or trusted VF. Command not supported\n");
5491                 return 0;
5492         }
5493
5494         if (dir == BNXT_DIR_RX) {
5495                 flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5496                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5497         } else if (dir == BNXT_DIR_TX) {
5498                 flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5499                 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5500         }
5501
5502         HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5503         req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5504         req.counter_type = rte_cpu_to_le_16(cntr);
5505         req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5506         req.num_entries = rte_cpu_to_le_16(num_entries);
5507         req.flags = rte_cpu_to_le_16(flags);
5508         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5509
5510         HWRM_CHECK_RESULT();
5511         HWRM_UNLOCK();
5512
5513         return 0;
5514 }
5515
5516 int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx)
5517 {
5518         struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5519         struct hwrm_cfa_vfr_alloc_input req = {0};
5520         int rc;
5521
5522         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5523                 PMD_DRV_LOG(DEBUG,
5524                             "Not a PF or trusted VF. Command not supported\n");
5525                 return 0;
5526         }
5527
5528         HWRM_PREP(&req, HWRM_CFA_VFR_ALLOC, BNXT_USE_CHIMP_MB);
5529         req.vf_id = rte_cpu_to_le_16(vf_idx);
5530         snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
5531                  bp->eth_dev->data->name, vf_idx);
5532
5533         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5534         HWRM_CHECK_RESULT();
5535
5536         HWRM_UNLOCK();
5537         PMD_DRV_LOG(DEBUG, "VFR %d allocated\n", vf_idx);
5538         return rc;
5539 }
5540
5541 int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx)
5542 {
5543         struct hwrm_cfa_vfr_free_output *resp = bp->hwrm_cmd_resp_addr;
5544         struct hwrm_cfa_vfr_free_input req = {0};
5545         int rc;
5546
5547         if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5548                 PMD_DRV_LOG(DEBUG,
5549                             "Not a PF or trusted VF. Command not supported\n");
5550                 return 0;
5551         }
5552
5553         HWRM_PREP(&req, HWRM_CFA_VFR_FREE, BNXT_USE_CHIMP_MB);
5554         req.vf_id = rte_cpu_to_le_16(vf_idx);
5555         snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
5556                  bp->eth_dev->data->name, vf_idx);
5557
5558         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5559         HWRM_CHECK_RESULT();
5560         HWRM_UNLOCK();
5561         PMD_DRV_LOG(DEBUG, "VFR %d freed\n", vf_idx);
5562         return rc;
5563 }