d3c50e49014586ab08413348bef9f229961baba1
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_VERSION_1_9_1              0x10901
31
32 struct bnxt_plcmodes_cfg {
33         uint32_t        flags;
34         uint16_t        jumbo_thresh;
35         uint16_t        hds_offset;
36         uint16_t        hds_threshold;
37 };
38
39 static int page_getenum(size_t size)
40 {
41         if (size <= 1 << 4)
42                 return 4;
43         if (size <= 1 << 12)
44                 return 12;
45         if (size <= 1 << 13)
46                 return 13;
47         if (size <= 1 << 16)
48                 return 16;
49         if (size <= 1 << 21)
50                 return 21;
51         if (size <= 1 << 22)
52                 return 22;
53         if (size <= 1 << 30)
54                 return 30;
55         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
56         return sizeof(void *) * 8 - 1;
57 }
58
59 static int page_roundup(size_t size)
60 {
61         return 1 << page_getenum(size);
62 }
63
64 /*
65  * HWRM Functions (sent to HWRM)
66  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
67  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
68  * command was failed by the ChiMP.
69  */
70
71 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
72                                         uint32_t msg_len)
73 {
74         unsigned int i;
75         struct input *req = msg;
76         struct output *resp = bp->hwrm_cmd_resp_addr;
77         uint32_t *data = msg;
78         uint8_t *bar;
79         uint8_t *valid;
80         uint16_t max_req_len = bp->max_req_len;
81         struct hwrm_short_input short_input = { 0 };
82
83         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
84                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
85
86                 memset(short_cmd_req, 0, bp->max_req_len);
87                 memcpy(short_cmd_req, req, msg_len);
88
89                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
90                 short_input.signature = rte_cpu_to_le_16(
91                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
92                 short_input.size = rte_cpu_to_le_16(msg_len);
93                 short_input.req_addr =
94                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
95
96                 data = (uint32_t *)&short_input;
97                 msg_len = sizeof(short_input);
98
99                 /* Sync memory write before updating doorbell */
100                 rte_wmb();
101
102                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
103         }
104
105         /* Write request msg to hwrm channel */
106         for (i = 0; i < msg_len; i += 4) {
107                 bar = (uint8_t *)bp->bar0 + i;
108                 rte_write32(*data, bar);
109                 data++;
110         }
111
112         /* Zero the rest of the request space */
113         for (; i < max_req_len; i += 4) {
114                 bar = (uint8_t *)bp->bar0 + i;
115                 rte_write32(0, bar);
116         }
117
118         /* Ring channel doorbell */
119         bar = (uint8_t *)bp->bar0 + 0x100;
120         rte_write32(1, bar);
121
122         /* Poll for the valid bit */
123         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
124                 /* Sanity check on the resp->resp_len */
125                 rte_rmb();
126                 if (resp->resp_len && resp->resp_len <=
127                                 bp->max_resp_len) {
128                         /* Last byte of resp contains the valid key */
129                         valid = (uint8_t *)resp + resp->resp_len - 1;
130                         if (*valid == HWRM_RESP_VALID_KEY)
131                                 break;
132                 }
133                 rte_delay_us(600);
134         }
135
136         if (i >= HWRM_CMD_TIMEOUT) {
137                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
138                         req->req_type);
139                 goto err_ret;
140         }
141         return 0;
142
143 err_ret:
144         return -1;
145 }
146
147 /*
148  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
149  * spinlock, and does initial processing.
150  *
151  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
152  * releases the spinlock only if it returns.  If the regular int return codes
153  * are not used by the function, HWRM_CHECK_RESULT() should not be used
154  * directly, rather it should be copied and modified to suit the function.
155  *
156  * HWRM_UNLOCK() must be called after all response processing is completed.
157  */
158 #define HWRM_PREP(req, type) do { \
159         rte_spinlock_lock(&bp->hwrm_lock); \
160         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
161         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
162         req.cmpl_ring = rte_cpu_to_le_16(-1); \
163         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
164         req.target_id = rte_cpu_to_le_16(0xffff); \
165         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
166 } while (0)
167
168 #define HWRM_CHECK_RESULT() do {\
169         if (rc) { \
170                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
171                 rte_spinlock_unlock(&bp->hwrm_lock); \
172                 return rc; \
173         } \
174         if (resp->error_code) { \
175                 rc = rte_le_to_cpu_16(resp->error_code); \
176                 if (resp->resp_len >= 16) { \
177                         struct hwrm_err_output *tmp_hwrm_err_op = \
178                                                 (void *)resp; \
179                         PMD_DRV_LOG(ERR, \
180                                 "error %d:%d:%08x:%04x\n", \
181                                 rc, tmp_hwrm_err_op->cmd_err, \
182                                 rte_le_to_cpu_32(\
183                                         tmp_hwrm_err_op->opaque_0), \
184                                 rte_le_to_cpu_16(\
185                                         tmp_hwrm_err_op->opaque_1)); \
186                 } else { \
187                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
188                 } \
189                 rte_spinlock_unlock(&bp->hwrm_lock); \
190                 return rc; \
191         } \
192 } while (0)
193
194 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
195
196 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
197 {
198         int rc = 0;
199         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
200         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
201
202         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
203         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
204         req.mask = 0;
205
206         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
207
208         HWRM_CHECK_RESULT();
209         HWRM_UNLOCK();
210
211         return rc;
212 }
213
214 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
215                                  struct bnxt_vnic_info *vnic,
216                                  uint16_t vlan_count,
217                                  struct bnxt_vlan_table_entry *vlan_table)
218 {
219         int rc = 0;
220         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
221         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
222         uint32_t mask = 0;
223
224         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
225                 return rc;
226
227         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
228         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
229
230         /* FIXME add multicast flag, when multicast adding options is supported
231          * by ethtool.
232          */
233         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
234                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
235         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
236                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
237         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
238                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
239         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
240                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
241         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
242                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
243         if (vnic->mc_addr_cnt) {
244                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
245                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
246                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
247         }
248         if (vlan_table) {
249                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
250                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
251                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
252                          rte_mem_virt2iova(vlan_table));
253                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
254         }
255         req.mask = rte_cpu_to_le_32(mask);
256
257         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
258
259         HWRM_CHECK_RESULT();
260         HWRM_UNLOCK();
261
262         return rc;
263 }
264
265 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
266                         uint16_t vlan_count,
267                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
268 {
269         int rc = 0;
270         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
271         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
272                                                 bp->hwrm_cmd_resp_addr;
273
274         /*
275          * Older HWRM versions did not support this command, and the set_rx_mask
276          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
277          * removed from set_rx_mask call, and this command was added.
278          *
279          * This command is also present from 1.7.8.11 and higher,
280          * as well as 1.7.8.0
281          */
282         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
283                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
284                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
285                                         (11)))
286                                 return 0;
287                 }
288         }
289         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
290         req.fid = rte_cpu_to_le_16(fid);
291
292         req.vlan_tag_mask_tbl_addr =
293                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
294         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
295
296         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
297
298         HWRM_CHECK_RESULT();
299         HWRM_UNLOCK();
300
301         return rc;
302 }
303
304 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
305                            struct bnxt_filter_info *filter)
306 {
307         int rc = 0;
308         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
309         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
310
311         if (filter->fw_l2_filter_id == UINT64_MAX)
312                 return 0;
313
314         HWRM_PREP(req, CFA_L2_FILTER_FREE);
315
316         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
317
318         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
319
320         HWRM_CHECK_RESULT();
321         HWRM_UNLOCK();
322
323         filter->fw_l2_filter_id = UINT64_MAX;
324
325         return 0;
326 }
327
328 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
329                          uint16_t dst_id,
330                          struct bnxt_filter_info *filter)
331 {
332         int rc = 0;
333         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
334         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
335         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
336         const struct rte_eth_vmdq_rx_conf *conf =
337                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
338         uint32_t enables = 0;
339         uint16_t j = dst_id - 1;
340
341         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
342         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
343             conf->pool_map[j].pools & (1UL << j)) {
344                 PMD_DRV_LOG(DEBUG,
345                         "Add vlan %u to vmdq pool %u\n",
346                         conf->pool_map[j].vlan_id, j);
347
348                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
349                 filter->enables |=
350                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
351                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
352         }
353
354         if (filter->fw_l2_filter_id != UINT64_MAX)
355                 bnxt_hwrm_clear_l2_filter(bp, filter);
356
357         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
358
359         req.flags = rte_cpu_to_le_32(filter->flags);
360
361         enables = filter->enables |
362               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
363         req.dst_id = rte_cpu_to_le_16(dst_id);
364
365         if (enables &
366             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
367                 memcpy(req.l2_addr, filter->l2_addr,
368                        ETHER_ADDR_LEN);
369         if (enables &
370             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
371                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
372                        ETHER_ADDR_LEN);
373         if (enables &
374             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
375                 req.l2_ovlan = filter->l2_ovlan;
376         if (enables &
377             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
378                 req.l2_ovlan = filter->l2_ivlan;
379         if (enables &
380             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
381                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
382         if (enables &
383             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
384                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
385         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
386                 req.src_id = rte_cpu_to_le_32(filter->src_id);
387         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
388                 req.src_type = filter->src_type;
389
390         req.enables = rte_cpu_to_le_32(enables);
391
392         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
393
394         HWRM_CHECK_RESULT();
395
396         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
397         HWRM_UNLOCK();
398
399         return rc;
400 }
401
402 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
403 {
404         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
405         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
406         uint32_t flags = 0;
407         int rc;
408
409         if (!ptp)
410                 return 0;
411
412         HWRM_PREP(req, PORT_MAC_CFG);
413
414         if (ptp->rx_filter)
415                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
416         else
417                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
418         if (ptp->tx_tstamp_en)
419                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
420         else
421                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
422         req.flags = rte_cpu_to_le_32(flags);
423         req.enables =
424         rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
425         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
426
427         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
428         HWRM_UNLOCK();
429
430         return rc;
431 }
432
433 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
434 {
435         int rc = 0;
436         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
437         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
438         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
439
440 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
441         if (ptp)
442                 return 0;
443
444         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
445
446         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
447
448         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
449
450         HWRM_CHECK_RESULT();
451
452         if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
453                 return 0;
454
455         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
456         if (!ptp)
457                 return -ENOMEM;
458
459         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
460                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
461         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
462                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
463         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
464                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
465         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
466                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
467         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
468                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
469         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
470                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
471         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
472                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
473         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
474                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
475         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
476                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
477
478         ptp->bp = bp;
479         bp->ptp_cfg = ptp;
480
481         return 0;
482 }
483
484 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
485 {
486         int rc = 0;
487         struct hwrm_func_qcaps_input req = {.req_type = 0 };
488         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
489         uint16_t new_max_vfs;
490         uint32_t flags;
491         int i;
492
493         HWRM_PREP(req, FUNC_QCAPS);
494
495         req.fid = rte_cpu_to_le_16(0xffff);
496
497         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
498
499         HWRM_CHECK_RESULT();
500
501         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
502         flags = rte_le_to_cpu_32(resp->flags);
503         if (BNXT_PF(bp)) {
504                 bp->pf.port_id = resp->port_id;
505                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
506                 new_max_vfs = bp->pdev->max_vfs;
507                 if (new_max_vfs != bp->pf.max_vfs) {
508                         if (bp->pf.vf_info)
509                                 rte_free(bp->pf.vf_info);
510                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
511                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
512                         bp->pf.max_vfs = new_max_vfs;
513                         for (i = 0; i < new_max_vfs; i++) {
514                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
515                                 bp->pf.vf_info[i].vlan_table =
516                                         rte_zmalloc("VF VLAN table",
517                                                     getpagesize(),
518                                                     getpagesize());
519                                 if (bp->pf.vf_info[i].vlan_table == NULL)
520                                         PMD_DRV_LOG(ERR,
521                                         "Fail to alloc VLAN table for VF %d\n",
522                                         i);
523                                 else
524                                         rte_mem_lock_page(
525                                                 bp->pf.vf_info[i].vlan_table);
526                                 bp->pf.vf_info[i].vlan_as_table =
527                                         rte_zmalloc("VF VLAN AS table",
528                                                     getpagesize(),
529                                                     getpagesize());
530                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
531                                         PMD_DRV_LOG(ERR,
532                                         "Alloc VLAN AS table for VF %d fail\n",
533                                         i);
534                                 else
535                                         rte_mem_lock_page(
536                                                bp->pf.vf_info[i].vlan_as_table);
537                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
538                         }
539                 }
540         }
541
542         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
543         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
544         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
545         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
546         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
547         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
548         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
549         /* TODO: For now, do not support VMDq/RFS on VFs. */
550         if (BNXT_PF(bp)) {
551                 if (bp->pf.max_vfs)
552                         bp->max_vnics = 1;
553                 else
554                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
555         } else {
556                 bp->max_vnics = 1;
557         }
558         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
559         if (BNXT_PF(bp)) {
560                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
561                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
562                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
563                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
564                         HWRM_UNLOCK();
565                         bnxt_hwrm_ptp_qcfg(bp);
566                 }
567         }
568
569         HWRM_UNLOCK();
570
571         return rc;
572 }
573
574 int bnxt_hwrm_func_reset(struct bnxt *bp)
575 {
576         int rc = 0;
577         struct hwrm_func_reset_input req = {.req_type = 0 };
578         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
579
580         HWRM_PREP(req, FUNC_RESET);
581
582         req.enables = rte_cpu_to_le_32(0);
583
584         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
585
586         HWRM_CHECK_RESULT();
587         HWRM_UNLOCK();
588
589         return rc;
590 }
591
592 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
593 {
594         int rc;
595         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
596         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
597
598         if (bp->flags & BNXT_FLAG_REGISTERED)
599                 return 0;
600
601         HWRM_PREP(req, FUNC_DRV_RGTR);
602         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
603                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
604         req.ver_maj = RTE_VER_YEAR;
605         req.ver_min = RTE_VER_MONTH;
606         req.ver_upd = RTE_VER_MINOR;
607
608         if (BNXT_PF(bp)) {
609                 req.enables |= rte_cpu_to_le_32(
610                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
611                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
612                        RTE_MIN(sizeof(req.vf_req_fwd),
613                                sizeof(bp->pf.vf_req_fwd)));
614         }
615
616         req.async_event_fwd[0] |=
617                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
618                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
619                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
620         req.async_event_fwd[1] |=
621                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
622                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
623
624         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
625
626         HWRM_CHECK_RESULT();
627         HWRM_UNLOCK();
628
629         bp->flags |= BNXT_FLAG_REGISTERED;
630
631         return rc;
632 }
633
634 int bnxt_hwrm_ver_get(struct bnxt *bp)
635 {
636         int rc = 0;
637         struct hwrm_ver_get_input req = {.req_type = 0 };
638         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
639         uint32_t my_version;
640         uint32_t fw_version;
641         uint16_t max_resp_len;
642         char type[RTE_MEMZONE_NAMESIZE];
643         uint32_t dev_caps_cfg;
644
645         bp->max_req_len = HWRM_MAX_REQ_LEN;
646         HWRM_PREP(req, VER_GET);
647
648         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
649         req.hwrm_intf_min = HWRM_VERSION_MINOR;
650         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
651
652         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
653
654         HWRM_CHECK_RESULT();
655
656         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
657                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
658                 resp->hwrm_intf_upd,
659                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
660         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
661                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
662         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
663                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
664
665         my_version = HWRM_VERSION_MAJOR << 16;
666         my_version |= HWRM_VERSION_MINOR << 8;
667         my_version |= HWRM_VERSION_UPDATE;
668
669         fw_version = resp->hwrm_intf_maj << 16;
670         fw_version |= resp->hwrm_intf_min << 8;
671         fw_version |= resp->hwrm_intf_upd;
672         bp->hwrm_spec_code = fw_version;
673
674         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
675                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
676                 rc = -EINVAL;
677                 goto error;
678         }
679
680         if (my_version != fw_version) {
681                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
682                 if (my_version < fw_version) {
683                         PMD_DRV_LOG(INFO,
684                                 "Firmware API version is newer than driver.\n");
685                         PMD_DRV_LOG(INFO,
686                                 "The driver may be missing features.\n");
687                 } else {
688                         PMD_DRV_LOG(INFO,
689                                 "Firmware API version is older than driver.\n");
690                         PMD_DRV_LOG(INFO,
691                                 "Not all driver features may be functional.\n");
692                 }
693         }
694
695         if (bp->max_req_len > resp->max_req_win_len) {
696                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
697                 rc = -EINVAL;
698         }
699         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
700         max_resp_len = resp->max_resp_len;
701         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
702
703         if (bp->max_resp_len != max_resp_len) {
704                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
705                         bp->pdev->addr.domain, bp->pdev->addr.bus,
706                         bp->pdev->addr.devid, bp->pdev->addr.function);
707
708                 rte_free(bp->hwrm_cmd_resp_addr);
709
710                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
711                 if (bp->hwrm_cmd_resp_addr == NULL) {
712                         rc = -ENOMEM;
713                         goto error;
714                 }
715                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
716                 bp->hwrm_cmd_resp_dma_addr =
717                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
718                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
719                         PMD_DRV_LOG(ERR,
720                         "Unable to map response buffer to physical memory.\n");
721                         rc = -ENOMEM;
722                         goto error;
723                 }
724                 bp->max_resp_len = max_resp_len;
725         }
726
727         if ((dev_caps_cfg &
728                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
729             (dev_caps_cfg &
730              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
731                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
732
733                 rte_free(bp->hwrm_short_cmd_req_addr);
734
735                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
736                                                         bp->max_req_len, 0);
737                 if (bp->hwrm_short_cmd_req_addr == NULL) {
738                         rc = -ENOMEM;
739                         goto error;
740                 }
741                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
742                 bp->hwrm_short_cmd_req_dma_addr =
743                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
744                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
745                         rte_free(bp->hwrm_short_cmd_req_addr);
746                         PMD_DRV_LOG(ERR,
747                                 "Unable to map buffer to physical memory.\n");
748                         rc = -ENOMEM;
749                         goto error;
750                 }
751
752                 bp->flags |= BNXT_FLAG_SHORT_CMD;
753         }
754
755 error:
756         HWRM_UNLOCK();
757         return rc;
758 }
759
760 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
761 {
762         int rc;
763         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
764         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
765
766         if (!(bp->flags & BNXT_FLAG_REGISTERED))
767                 return 0;
768
769         HWRM_PREP(req, FUNC_DRV_UNRGTR);
770         req.flags = flags;
771
772         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
773
774         HWRM_CHECK_RESULT();
775         HWRM_UNLOCK();
776
777         bp->flags &= ~BNXT_FLAG_REGISTERED;
778
779         return rc;
780 }
781
782 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
783 {
784         int rc = 0;
785         struct hwrm_port_phy_cfg_input req = {0};
786         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
787         uint32_t enables = 0;
788
789         HWRM_PREP(req, PORT_PHY_CFG);
790
791         if (conf->link_up) {
792                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
793                 if (bp->link_info.auto_mode && conf->link_speed) {
794                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
795                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
796                 }
797
798                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
799                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
800                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
801                 /*
802                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
803                  * any auto mode, even "none".
804                  */
805                 if (!conf->link_speed) {
806                         /* No speeds specified. Enable AutoNeg - all speeds */
807                         req.auto_mode =
808                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
809                 }
810                 /* AutoNeg - Advertise speeds specified. */
811                 if (conf->auto_link_speed_mask &&
812                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
813                         req.auto_mode =
814                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
815                         req.auto_link_speed_mask =
816                                 conf->auto_link_speed_mask;
817                         enables |=
818                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
819                 }
820
821                 req.auto_duplex = conf->duplex;
822                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
823                 req.auto_pause = conf->auto_pause;
824                 req.force_pause = conf->force_pause;
825                 /* Set force_pause if there is no auto or if there is a force */
826                 if (req.auto_pause && !req.force_pause)
827                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
828                 else
829                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
830
831                 req.enables = rte_cpu_to_le_32(enables);
832         } else {
833                 req.flags =
834                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
835                 PMD_DRV_LOG(INFO, "Force Link Down\n");
836         }
837
838         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
839
840         HWRM_CHECK_RESULT();
841         HWRM_UNLOCK();
842
843         return rc;
844 }
845
846 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
847                                    struct bnxt_link_info *link_info)
848 {
849         int rc = 0;
850         struct hwrm_port_phy_qcfg_input req = {0};
851         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
852
853         HWRM_PREP(req, PORT_PHY_QCFG);
854
855         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
856
857         HWRM_CHECK_RESULT();
858
859         link_info->phy_link_status = resp->link;
860         link_info->link_up =
861                 (link_info->phy_link_status ==
862                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
863         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
864         link_info->duplex = resp->duplex_cfg;
865         link_info->pause = resp->pause;
866         link_info->auto_pause = resp->auto_pause;
867         link_info->force_pause = resp->force_pause;
868         link_info->auto_mode = resp->auto_mode;
869         link_info->phy_type = resp->phy_type;
870         link_info->media_type = resp->media_type;
871
872         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
873         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
874         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
875         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
876         link_info->phy_ver[0] = resp->phy_maj;
877         link_info->phy_ver[1] = resp->phy_min;
878         link_info->phy_ver[2] = resp->phy_bld;
879
880         HWRM_UNLOCK();
881
882         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
883         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
884         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
885         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
886         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
887                     link_info->auto_link_speed_mask);
888         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
889                     link_info->force_link_speed);
890
891         return rc;
892 }
893
894 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
895 {
896         int rc = 0;
897         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
898         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
899         int i;
900
901         HWRM_PREP(req, QUEUE_QPORTCFG);
902
903         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
904         /* HWRM Version >= 1.9.1 */
905         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
906                 req.drv_qmap_cap =
907                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
908         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
909
910         HWRM_CHECK_RESULT();
911
912 #define GET_QUEUE_INFO(x) \
913         bp->cos_queue[x].id = resp->queue_id##x; \
914         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
915
916         GET_QUEUE_INFO(0);
917         GET_QUEUE_INFO(1);
918         GET_QUEUE_INFO(2);
919         GET_QUEUE_INFO(3);
920         GET_QUEUE_INFO(4);
921         GET_QUEUE_INFO(5);
922         GET_QUEUE_INFO(6);
923         GET_QUEUE_INFO(7);
924
925         HWRM_UNLOCK();
926
927         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
928                 bp->tx_cosq_id = bp->cos_queue[0].id;
929         } else {
930                 /* iterate and find the COSq profile to use for Tx */
931                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
932                         if (bp->cos_queue[i].profile ==
933                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
934                                 bp->tx_cosq_id = bp->cos_queue[i].id;
935                                 break;
936                         }
937                 }
938         }
939         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
940
941         return rc;
942 }
943
944 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
945                          struct bnxt_ring *ring,
946                          uint32_t ring_type, uint32_t map_index,
947                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
948 {
949         int rc = 0;
950         uint32_t enables = 0;
951         struct hwrm_ring_alloc_input req = {.req_type = 0 };
952         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
953
954         HWRM_PREP(req, RING_ALLOC);
955
956         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
957         req.fbo = rte_cpu_to_le_32(0);
958         /* Association of ring index with doorbell index */
959         req.logical_id = rte_cpu_to_le_16(map_index);
960         req.length = rte_cpu_to_le_32(ring->ring_size);
961
962         switch (ring_type) {
963         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
964                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
965                 /* FALLTHROUGH */
966         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
967                 req.ring_type = ring_type;
968                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
969                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
970                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
971                         enables |=
972                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
973                 break;
974         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
975                 req.ring_type = ring_type;
976                 /*
977                  * TODO: Some HWRM versions crash with
978                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
979                  */
980                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
981                 break;
982         default:
983                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
984                         ring_type);
985                 HWRM_UNLOCK();
986                 return -1;
987         }
988         req.enables = rte_cpu_to_le_32(enables);
989
990         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
991
992         if (rc || resp->error_code) {
993                 if (rc == 0 && resp->error_code)
994                         rc = rte_le_to_cpu_16(resp->error_code);
995                 switch (ring_type) {
996                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
997                         PMD_DRV_LOG(ERR,
998                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
999                         HWRM_UNLOCK();
1000                         return rc;
1001                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1002                         PMD_DRV_LOG(ERR,
1003                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1004                         HWRM_UNLOCK();
1005                         return rc;
1006                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1007                         PMD_DRV_LOG(ERR,
1008                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1009                         HWRM_UNLOCK();
1010                         return rc;
1011                 default:
1012                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1013                         HWRM_UNLOCK();
1014                         return rc;
1015                 }
1016         }
1017
1018         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1019         HWRM_UNLOCK();
1020         return rc;
1021 }
1022
1023 int bnxt_hwrm_ring_free(struct bnxt *bp,
1024                         struct bnxt_ring *ring, uint32_t ring_type)
1025 {
1026         int rc;
1027         struct hwrm_ring_free_input req = {.req_type = 0 };
1028         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1029
1030         HWRM_PREP(req, RING_FREE);
1031
1032         req.ring_type = ring_type;
1033         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1034
1035         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1036
1037         if (rc || resp->error_code) {
1038                 if (rc == 0 && resp->error_code)
1039                         rc = rte_le_to_cpu_16(resp->error_code);
1040                 HWRM_UNLOCK();
1041
1042                 switch (ring_type) {
1043                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1044                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1045                                 rc);
1046                         return rc;
1047                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1048                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1049                                 rc);
1050                         return rc;
1051                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1052                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1053                                 rc);
1054                         return rc;
1055                 default:
1056                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1057                         return rc;
1058                 }
1059         }
1060         HWRM_UNLOCK();
1061         return 0;
1062 }
1063
1064 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1065 {
1066         int rc = 0;
1067         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1068         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1069
1070         HWRM_PREP(req, RING_GRP_ALLOC);
1071
1072         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1073         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1074         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1075         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1076
1077         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1078
1079         HWRM_CHECK_RESULT();
1080
1081         bp->grp_info[idx].fw_grp_id =
1082             rte_le_to_cpu_16(resp->ring_group_id);
1083
1084         HWRM_UNLOCK();
1085
1086         return rc;
1087 }
1088
1089 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1090 {
1091         int rc;
1092         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1093         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1094
1095         HWRM_PREP(req, RING_GRP_FREE);
1096
1097         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1098
1099         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1100
1101         HWRM_CHECK_RESULT();
1102         HWRM_UNLOCK();
1103
1104         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1105         return rc;
1106 }
1107
1108 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1109 {
1110         int rc = 0;
1111         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1112         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1113
1114         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1115                 return rc;
1116
1117         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1118
1119         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1120
1121         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1122
1123         HWRM_CHECK_RESULT();
1124         HWRM_UNLOCK();
1125
1126         return rc;
1127 }
1128
1129 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1130                                 unsigned int idx __rte_unused)
1131 {
1132         int rc;
1133         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1134         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1135
1136         HWRM_PREP(req, STAT_CTX_ALLOC);
1137
1138         req.update_period_ms = rte_cpu_to_le_32(0);
1139
1140         req.stats_dma_addr =
1141             rte_cpu_to_le_64(cpr->hw_stats_map);
1142
1143         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1144
1145         HWRM_CHECK_RESULT();
1146
1147         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1148
1149         HWRM_UNLOCK();
1150
1151         return rc;
1152 }
1153
1154 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1155                                 unsigned int idx __rte_unused)
1156 {
1157         int rc;
1158         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1159         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1160
1161         HWRM_PREP(req, STAT_CTX_FREE);
1162
1163         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1164
1165         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1166
1167         HWRM_CHECK_RESULT();
1168         HWRM_UNLOCK();
1169
1170         return rc;
1171 }
1172
1173 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1174 {
1175         int rc = 0, i, j;
1176         struct hwrm_vnic_alloc_input req = { 0 };
1177         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1178
1179         /* map ring groups to this vnic */
1180         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1181                 vnic->start_grp_id, vnic->end_grp_id);
1182         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1183                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1184         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1185         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1186         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1187         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1188         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1189                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1190         HWRM_PREP(req, VNIC_ALLOC);
1191
1192         if (vnic->func_default)
1193                 req.flags =
1194                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1195         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1196
1197         HWRM_CHECK_RESULT();
1198
1199         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1200         HWRM_UNLOCK();
1201         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1202         return rc;
1203 }
1204
1205 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1206                                         struct bnxt_vnic_info *vnic,
1207                                         struct bnxt_plcmodes_cfg *pmode)
1208 {
1209         int rc = 0;
1210         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1211         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1212
1213         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1214
1215         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1216
1217         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1218
1219         HWRM_CHECK_RESULT();
1220
1221         pmode->flags = rte_le_to_cpu_32(resp->flags);
1222         /* dflt_vnic bit doesn't exist in the _cfg command */
1223         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1224         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1225         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1226         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1227
1228         HWRM_UNLOCK();
1229
1230         return rc;
1231 }
1232
1233 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1234                                        struct bnxt_vnic_info *vnic,
1235                                        struct bnxt_plcmodes_cfg *pmode)
1236 {
1237         int rc = 0;
1238         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1239         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1240
1241         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1242
1243         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1244         req.flags = rte_cpu_to_le_32(pmode->flags);
1245         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1246         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1247         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1248         req.enables = rte_cpu_to_le_32(
1249             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1250             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1251             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1252         );
1253
1254         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1255
1256         HWRM_CHECK_RESULT();
1257         HWRM_UNLOCK();
1258
1259         return rc;
1260 }
1261
1262 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1263 {
1264         int rc = 0;
1265         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1266         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1267         uint32_t ctx_enable_flag = 0;
1268         struct bnxt_plcmodes_cfg pmodes;
1269
1270         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1271                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1272                 return rc;
1273         }
1274
1275         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1276         if (rc)
1277                 return rc;
1278
1279         HWRM_PREP(req, VNIC_CFG);
1280
1281         /* Only RSS support for now TBD: COS & LB */
1282         req.enables =
1283             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1284         if (vnic->lb_rule != 0xffff)
1285                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1286         if (vnic->cos_rule != 0xffff)
1287                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1288         if (vnic->rss_rule != 0xffff) {
1289                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1290                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1291         }
1292         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1293         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1294         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1295         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1296         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1297         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1298         req.mru = rte_cpu_to_le_16(vnic->mru);
1299         if (vnic->func_default)
1300                 req.flags |=
1301                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1302         if (vnic->vlan_strip)
1303                 req.flags |=
1304                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1305         if (vnic->bd_stall)
1306                 req.flags |=
1307                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1308         if (vnic->roce_dual)
1309                 req.flags |= rte_cpu_to_le_32(
1310                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1311         if (vnic->roce_only)
1312                 req.flags |= rte_cpu_to_le_32(
1313                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1314         if (vnic->rss_dflt_cr)
1315                 req.flags |= rte_cpu_to_le_32(
1316                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1317
1318         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1319
1320         HWRM_CHECK_RESULT();
1321         HWRM_UNLOCK();
1322
1323         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1324
1325         return rc;
1326 }
1327
1328 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1329                 int16_t fw_vf_id)
1330 {
1331         int rc = 0;
1332         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1333         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1334
1335         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1336                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1337                 return rc;
1338         }
1339         HWRM_PREP(req, VNIC_QCFG);
1340
1341         req.enables =
1342                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1343         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1344         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1345
1346         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1347
1348         HWRM_CHECK_RESULT();
1349
1350         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1351         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1352         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1353         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1354         vnic->mru = rte_le_to_cpu_16(resp->mru);
1355         vnic->func_default = rte_le_to_cpu_32(
1356                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1357         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1358                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1359         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1360                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1361         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1362                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1363         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1364                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1365         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1366                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1367
1368         HWRM_UNLOCK();
1369
1370         return rc;
1371 }
1372
1373 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1374 {
1375         int rc = 0;
1376         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1377         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1378                                                 bp->hwrm_cmd_resp_addr;
1379
1380         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1381
1382         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1383
1384         HWRM_CHECK_RESULT();
1385
1386         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1387         HWRM_UNLOCK();
1388         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1389
1390         return rc;
1391 }
1392
1393 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1394 {
1395         int rc = 0;
1396         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1397         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1398                                                 bp->hwrm_cmd_resp_addr;
1399
1400         if (vnic->rss_rule == 0xffff) {
1401                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1402                 return rc;
1403         }
1404         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1405
1406         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1407
1408         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1409
1410         HWRM_CHECK_RESULT();
1411         HWRM_UNLOCK();
1412
1413         vnic->rss_rule = INVALID_HW_RING_ID;
1414
1415         return rc;
1416 }
1417
1418 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1419 {
1420         int rc = 0;
1421         struct hwrm_vnic_free_input req = {.req_type = 0 };
1422         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1423
1424         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1425                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1426                 return rc;
1427         }
1428
1429         HWRM_PREP(req, VNIC_FREE);
1430
1431         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1432
1433         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1434
1435         HWRM_CHECK_RESULT();
1436         HWRM_UNLOCK();
1437
1438         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1439         return rc;
1440 }
1441
1442 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1443                            struct bnxt_vnic_info *vnic)
1444 {
1445         int rc = 0;
1446         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1447         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1448
1449         HWRM_PREP(req, VNIC_RSS_CFG);
1450
1451         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1452
1453         req.ring_grp_tbl_addr =
1454             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1455         req.hash_key_tbl_addr =
1456             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1457         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1458
1459         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1460
1461         HWRM_CHECK_RESULT();
1462         HWRM_UNLOCK();
1463
1464         return rc;
1465 }
1466
1467 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1468                         struct bnxt_vnic_info *vnic)
1469 {
1470         int rc = 0;
1471         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1472         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1473         uint16_t size;
1474
1475         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1476
1477         req.flags = rte_cpu_to_le_32(
1478                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1479
1480         req.enables = rte_cpu_to_le_32(
1481                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1482
1483         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1484         size -= RTE_PKTMBUF_HEADROOM;
1485
1486         req.jumbo_thresh = rte_cpu_to_le_16(size);
1487         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1488
1489         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1490
1491         HWRM_CHECK_RESULT();
1492         HWRM_UNLOCK();
1493
1494         return rc;
1495 }
1496
1497 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1498                         struct bnxt_vnic_info *vnic, bool enable)
1499 {
1500         int rc = 0;
1501         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1502         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1503
1504         HWRM_PREP(req, VNIC_TPA_CFG);
1505
1506         if (enable) {
1507                 req.enables = rte_cpu_to_le_32(
1508                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1509                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1510                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1511                 req.flags = rte_cpu_to_le_32(
1512                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1513                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1514                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1515                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1516                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1517                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1518                 req.max_agg_segs = rte_cpu_to_le_16(5);
1519                 req.max_aggs =
1520                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1521                 req.min_agg_len = rte_cpu_to_le_32(512);
1522         }
1523         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1524
1525         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1526
1527         HWRM_CHECK_RESULT();
1528         HWRM_UNLOCK();
1529
1530         return rc;
1531 }
1532
1533 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1534 {
1535         struct hwrm_func_cfg_input req = {0};
1536         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1537         int rc;
1538
1539         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1540         req.enables = rte_cpu_to_le_32(
1541                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1542         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1543         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1544
1545         HWRM_PREP(req, FUNC_CFG);
1546
1547         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1548         HWRM_CHECK_RESULT();
1549         HWRM_UNLOCK();
1550
1551         bp->pf.vf_info[vf].random_mac = false;
1552
1553         return rc;
1554 }
1555
1556 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1557                                   uint64_t *dropped)
1558 {
1559         int rc = 0;
1560         struct hwrm_func_qstats_input req = {.req_type = 0};
1561         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1562
1563         HWRM_PREP(req, FUNC_QSTATS);
1564
1565         req.fid = rte_cpu_to_le_16(fid);
1566
1567         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1568
1569         HWRM_CHECK_RESULT();
1570
1571         if (dropped)
1572                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1573
1574         HWRM_UNLOCK();
1575
1576         return rc;
1577 }
1578
1579 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1580                           struct rte_eth_stats *stats)
1581 {
1582         int rc = 0;
1583         struct hwrm_func_qstats_input req = {.req_type = 0};
1584         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1585
1586         HWRM_PREP(req, FUNC_QSTATS);
1587
1588         req.fid = rte_cpu_to_le_16(fid);
1589
1590         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1591
1592         HWRM_CHECK_RESULT();
1593
1594         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1595         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1596         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1597         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1598         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1599         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1600
1601         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1602         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1603         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1604         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1605         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1606         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1607
1608         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1609         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1610
1611         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1612
1613         HWRM_UNLOCK();
1614
1615         return rc;
1616 }
1617
1618 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1619 {
1620         int rc = 0;
1621         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1622         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1623
1624         HWRM_PREP(req, FUNC_CLR_STATS);
1625
1626         req.fid = rte_cpu_to_le_16(fid);
1627
1628         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1629
1630         HWRM_CHECK_RESULT();
1631         HWRM_UNLOCK();
1632
1633         return rc;
1634 }
1635
1636 /*
1637  * HWRM utility functions
1638  */
1639
1640 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1641 {
1642         unsigned int i;
1643         int rc = 0;
1644
1645         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1646                 struct bnxt_tx_queue *txq;
1647                 struct bnxt_rx_queue *rxq;
1648                 struct bnxt_cp_ring_info *cpr;
1649
1650                 if (i >= bp->rx_cp_nr_rings) {
1651                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1652                         cpr = txq->cp_ring;
1653                 } else {
1654                         rxq = bp->rx_queues[i];
1655                         cpr = rxq->cp_ring;
1656                 }
1657
1658                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1659                 if (rc)
1660                         return rc;
1661         }
1662         return 0;
1663 }
1664
1665 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1666 {
1667         int rc;
1668         unsigned int i;
1669         struct bnxt_cp_ring_info *cpr;
1670
1671         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1672
1673                 if (i >= bp->rx_cp_nr_rings) {
1674                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1675                 } else {
1676                         cpr = bp->rx_queues[i]->cp_ring;
1677                         bp->grp_info[i].fw_stats_ctx = -1;
1678                 }
1679                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1680                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1681                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1682                         if (rc)
1683                                 return rc;
1684                 }
1685         }
1686         return 0;
1687 }
1688
1689 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1690 {
1691         unsigned int i;
1692         int rc = 0;
1693
1694         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1695                 struct bnxt_tx_queue *txq;
1696                 struct bnxt_rx_queue *rxq;
1697                 struct bnxt_cp_ring_info *cpr;
1698
1699                 if (i >= bp->rx_cp_nr_rings) {
1700                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1701                         cpr = txq->cp_ring;
1702                 } else {
1703                         rxq = bp->rx_queues[i];
1704                         cpr = rxq->cp_ring;
1705                 }
1706
1707                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1708
1709                 if (rc)
1710                         return rc;
1711         }
1712         return rc;
1713 }
1714
1715 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1716 {
1717         uint16_t idx;
1718         uint32_t rc = 0;
1719
1720         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1721
1722                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1723                         continue;
1724
1725                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1726
1727                 if (rc)
1728                         return rc;
1729         }
1730         return rc;
1731 }
1732
1733 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1734                                 unsigned int idx __rte_unused)
1735 {
1736         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1737
1738         bnxt_hwrm_ring_free(bp, cp_ring,
1739                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1740         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1741         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1742                         sizeof(*cpr->cp_desc_ring));
1743         cpr->cp_raw_cons = 0;
1744 }
1745
1746 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1747 {
1748         unsigned int i;
1749         int rc = 0;
1750
1751         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1752                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1753                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1754                 struct bnxt_ring *ring = txr->tx_ring_struct;
1755                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1756                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1757
1758                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1759                         bnxt_hwrm_ring_free(bp, ring,
1760                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1761                         ring->fw_ring_id = INVALID_HW_RING_ID;
1762                         memset(txr->tx_desc_ring, 0,
1763                                         txr->tx_ring_struct->ring_size *
1764                                         sizeof(*txr->tx_desc_ring));
1765                         memset(txr->tx_buf_ring, 0,
1766                                         txr->tx_ring_struct->ring_size *
1767                                         sizeof(*txr->tx_buf_ring));
1768                         txr->tx_prod = 0;
1769                         txr->tx_cons = 0;
1770                 }
1771                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1772                         bnxt_free_cp_ring(bp, cpr, idx);
1773                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1774                 }
1775         }
1776
1777         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1778                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1779                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1780                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1781                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1782                 unsigned int idx = i + 1;
1783
1784                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1785                         bnxt_hwrm_ring_free(bp, ring,
1786                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1787                         ring->fw_ring_id = INVALID_HW_RING_ID;
1788                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1789                         memset(rxr->rx_desc_ring, 0,
1790                                         rxr->rx_ring_struct->ring_size *
1791                                         sizeof(*rxr->rx_desc_ring));
1792                         memset(rxr->rx_buf_ring, 0,
1793                                         rxr->rx_ring_struct->ring_size *
1794                                         sizeof(*rxr->rx_buf_ring));
1795                         rxr->rx_prod = 0;
1796                 }
1797                 ring = rxr->ag_ring_struct;
1798                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1799                         bnxt_hwrm_ring_free(bp, ring,
1800                                             HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1801                         ring->fw_ring_id = INVALID_HW_RING_ID;
1802                         memset(rxr->ag_buf_ring, 0,
1803                                rxr->ag_ring_struct->ring_size *
1804                                sizeof(*rxr->ag_buf_ring));
1805                         rxr->ag_prod = 0;
1806                         bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1807                 }
1808                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1809                         bnxt_free_cp_ring(bp, cpr, idx);
1810                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1811                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1812                 }
1813         }
1814
1815         /* Default completion ring */
1816         {
1817                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1818
1819                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1820                         bnxt_free_cp_ring(bp, cpr, 0);
1821                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1822                 }
1823         }
1824
1825         return rc;
1826 }
1827
1828 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1829 {
1830         uint16_t i;
1831         uint32_t rc = 0;
1832
1833         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1834                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1835                 if (rc)
1836                         return rc;
1837         }
1838         return rc;
1839 }
1840
1841 void bnxt_free_hwrm_resources(struct bnxt *bp)
1842 {
1843         /* Release memzone */
1844         rte_free(bp->hwrm_cmd_resp_addr);
1845         rte_free(bp->hwrm_short_cmd_req_addr);
1846         bp->hwrm_cmd_resp_addr = NULL;
1847         bp->hwrm_short_cmd_req_addr = NULL;
1848         bp->hwrm_cmd_resp_dma_addr = 0;
1849         bp->hwrm_short_cmd_req_dma_addr = 0;
1850 }
1851
1852 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1853 {
1854         struct rte_pci_device *pdev = bp->pdev;
1855         char type[RTE_MEMZONE_NAMESIZE];
1856
1857         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1858                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1859         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1860         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1861         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1862         if (bp->hwrm_cmd_resp_addr == NULL)
1863                 return -ENOMEM;
1864         bp->hwrm_cmd_resp_dma_addr =
1865                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1866         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1867                 PMD_DRV_LOG(ERR,
1868                         "unable to map response address to physical memory\n");
1869                 return -ENOMEM;
1870         }
1871         rte_spinlock_init(&bp->hwrm_lock);
1872
1873         return 0;
1874 }
1875
1876 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1877 {
1878         struct bnxt_filter_info *filter;
1879         int rc = 0;
1880
1881         STAILQ_FOREACH(filter, &vnic->filter, next) {
1882                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1883                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1884                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1885                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1886                 else
1887                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1888                 //if (rc)
1889                         //break;
1890         }
1891         return rc;
1892 }
1893
1894 static int
1895 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1896 {
1897         struct bnxt_filter_info *filter;
1898         struct rte_flow *flow;
1899         int rc = 0;
1900
1901         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1902                 filter = flow->filter;
1903                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1904                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1905                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1906                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1907                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1908                 else
1909                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1910
1911                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1912                 rte_free(flow);
1913                 //if (rc)
1914                         //break;
1915         }
1916         return rc;
1917 }
1918
1919 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1920 {
1921         struct bnxt_filter_info *filter;
1922         int rc = 0;
1923
1924         STAILQ_FOREACH(filter, &vnic->filter, next) {
1925                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1926                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1927                                                      filter);
1928                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1929                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1930                                                          filter);
1931                 else
1932                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1933                                                      filter);
1934                 if (rc)
1935                         break;
1936         }
1937         return rc;
1938 }
1939
1940 void bnxt_free_tunnel_ports(struct bnxt *bp)
1941 {
1942         if (bp->vxlan_port_cnt)
1943                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1944                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1945         bp->vxlan_port = 0;
1946         if (bp->geneve_port_cnt)
1947                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1948                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1949         bp->geneve_port = 0;
1950 }
1951
1952 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1953 {
1954         int i;
1955
1956         if (bp->vnic_info == NULL)
1957                 return;
1958
1959         /*
1960          * Cleanup VNICs in reverse order, to make sure the L2 filter
1961          * from vnic0 is last to be cleaned up.
1962          */
1963         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1964                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1965
1966                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1967
1968                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1969
1970                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1971
1972                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1973
1974                 bnxt_hwrm_vnic_free(bp, vnic);
1975         }
1976         /* Ring resources */
1977         bnxt_free_all_hwrm_rings(bp);
1978         bnxt_free_all_hwrm_ring_grps(bp);
1979         bnxt_free_all_hwrm_stat_ctxs(bp);
1980         bnxt_free_tunnel_ports(bp);
1981 }
1982
1983 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1984 {
1985         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1986
1987         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1988                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1989
1990         switch (conf_link_speed) {
1991         case ETH_LINK_SPEED_10M_HD:
1992         case ETH_LINK_SPEED_100M_HD:
1993                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1994         }
1995         return hw_link_duplex;
1996 }
1997
1998 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1999 {
2000         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2001 }
2002
2003 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2004 {
2005         uint16_t eth_link_speed = 0;
2006
2007         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2008                 return ETH_LINK_SPEED_AUTONEG;
2009
2010         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2011         case ETH_LINK_SPEED_100M:
2012         case ETH_LINK_SPEED_100M_HD:
2013                 eth_link_speed =
2014                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2015                 break;
2016         case ETH_LINK_SPEED_1G:
2017                 eth_link_speed =
2018                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2019                 break;
2020         case ETH_LINK_SPEED_2_5G:
2021                 eth_link_speed =
2022                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2023                 break;
2024         case ETH_LINK_SPEED_10G:
2025                 eth_link_speed =
2026                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2027                 break;
2028         case ETH_LINK_SPEED_20G:
2029                 eth_link_speed =
2030                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2031                 break;
2032         case ETH_LINK_SPEED_25G:
2033                 eth_link_speed =
2034                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2035                 break;
2036         case ETH_LINK_SPEED_40G:
2037                 eth_link_speed =
2038                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2039                 break;
2040         case ETH_LINK_SPEED_50G:
2041                 eth_link_speed =
2042                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2043                 break;
2044         case ETH_LINK_SPEED_100G:
2045                 eth_link_speed =
2046                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2047                 break;
2048         default:
2049                 PMD_DRV_LOG(ERR,
2050                         "Unsupported link speed %d; default to AUTO\n",
2051                         conf_link_speed);
2052                 break;
2053         }
2054         return eth_link_speed;
2055 }
2056
2057 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2058                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2059                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2060                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2061
2062 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2063 {
2064         uint32_t one_speed;
2065
2066         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2067                 return 0;
2068
2069         if (link_speed & ETH_LINK_SPEED_FIXED) {
2070                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2071
2072                 if (one_speed & (one_speed - 1)) {
2073                         PMD_DRV_LOG(ERR,
2074                                 "Invalid advertised speeds (%u) for port %u\n",
2075                                 link_speed, port_id);
2076                         return -EINVAL;
2077                 }
2078                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2079                         PMD_DRV_LOG(ERR,
2080                                 "Unsupported advertised speed (%u) for port %u\n",
2081                                 link_speed, port_id);
2082                         return -EINVAL;
2083                 }
2084         } else {
2085                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2086                         PMD_DRV_LOG(ERR,
2087                                 "Unsupported advertised speeds (%u) for port %u\n",
2088                                 link_speed, port_id);
2089                         return -EINVAL;
2090                 }
2091         }
2092         return 0;
2093 }
2094
2095 static uint16_t
2096 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2097 {
2098         uint16_t ret = 0;
2099
2100         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2101                 if (bp->link_info.support_speeds)
2102                         return bp->link_info.support_speeds;
2103                 link_speed = BNXT_SUPPORTED_SPEEDS;
2104         }
2105
2106         if (link_speed & ETH_LINK_SPEED_100M)
2107                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2108         if (link_speed & ETH_LINK_SPEED_100M_HD)
2109                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2110         if (link_speed & ETH_LINK_SPEED_1G)
2111                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2112         if (link_speed & ETH_LINK_SPEED_2_5G)
2113                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2114         if (link_speed & ETH_LINK_SPEED_10G)
2115                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2116         if (link_speed & ETH_LINK_SPEED_20G)
2117                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2118         if (link_speed & ETH_LINK_SPEED_25G)
2119                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2120         if (link_speed & ETH_LINK_SPEED_40G)
2121                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2122         if (link_speed & ETH_LINK_SPEED_50G)
2123                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2124         if (link_speed & ETH_LINK_SPEED_100G)
2125                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2126         return ret;
2127 }
2128
2129 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2130 {
2131         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2132
2133         switch (hw_link_speed) {
2134         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2135                 eth_link_speed = ETH_SPEED_NUM_100M;
2136                 break;
2137         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2138                 eth_link_speed = ETH_SPEED_NUM_1G;
2139                 break;
2140         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2141                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2142                 break;
2143         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2144                 eth_link_speed = ETH_SPEED_NUM_10G;
2145                 break;
2146         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2147                 eth_link_speed = ETH_SPEED_NUM_20G;
2148                 break;
2149         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2150                 eth_link_speed = ETH_SPEED_NUM_25G;
2151                 break;
2152         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2153                 eth_link_speed = ETH_SPEED_NUM_40G;
2154                 break;
2155         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2156                 eth_link_speed = ETH_SPEED_NUM_50G;
2157                 break;
2158         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2159                 eth_link_speed = ETH_SPEED_NUM_100G;
2160                 break;
2161         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2162         default:
2163                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2164                         hw_link_speed);
2165                 break;
2166         }
2167         return eth_link_speed;
2168 }
2169
2170 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2171 {
2172         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2173
2174         switch (hw_link_duplex) {
2175         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2176         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2177                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2178                 break;
2179         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2180                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2181                 break;
2182         default:
2183                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2184                         hw_link_duplex);
2185                 break;
2186         }
2187         return eth_link_duplex;
2188 }
2189
2190 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2191 {
2192         int rc = 0;
2193         struct bnxt_link_info *link_info = &bp->link_info;
2194
2195         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2196         if (rc) {
2197                 PMD_DRV_LOG(ERR,
2198                         "Get link config failed with rc %d\n", rc);
2199                 goto exit;
2200         }
2201         if (link_info->link_speed)
2202                 link->link_speed =
2203                         bnxt_parse_hw_link_speed(link_info->link_speed);
2204         else
2205                 link->link_speed = ETH_SPEED_NUM_NONE;
2206         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2207         link->link_status = link_info->link_up;
2208         link->link_autoneg = link_info->auto_mode ==
2209                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2210                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2211 exit:
2212         return rc;
2213 }
2214
2215 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2216 {
2217         int rc = 0;
2218         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2219         struct bnxt_link_info link_req;
2220         uint16_t speed, autoneg;
2221
2222         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2223                 return 0;
2224
2225         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2226                         bp->eth_dev->data->port_id);
2227         if (rc)
2228                 goto error;
2229
2230         memset(&link_req, 0, sizeof(link_req));
2231         link_req.link_up = link_up;
2232         if (!link_up)
2233                 goto port_phy_cfg;
2234
2235         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2236         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2237         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2238         /* Autoneg can be done only when the FW allows */
2239         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2240                                 bp->link_info.force_link_speed)) {
2241                 link_req.phy_flags |=
2242                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2243                 link_req.auto_link_speed_mask =
2244                         bnxt_parse_eth_link_speed_mask(bp,
2245                                                        dev_conf->link_speeds);
2246         } else {
2247                 if (bp->link_info.phy_type ==
2248                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2249                     bp->link_info.phy_type ==
2250                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2251                     bp->link_info.media_type ==
2252                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2253                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2254                         return -EINVAL;
2255                 }
2256
2257                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2258                 /* If user wants a particular speed try that first. */
2259                 if (speed)
2260                         link_req.link_speed = speed;
2261                 else if (bp->link_info.force_link_speed)
2262                         link_req.link_speed = bp->link_info.force_link_speed;
2263                 else
2264                         link_req.link_speed = bp->link_info.auto_link_speed;
2265         }
2266         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2267         link_req.auto_pause = bp->link_info.auto_pause;
2268         link_req.force_pause = bp->link_info.force_pause;
2269
2270 port_phy_cfg:
2271         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2272         if (rc) {
2273                 PMD_DRV_LOG(ERR,
2274                         "Set link config failed with rc %d\n", rc);
2275         }
2276
2277 error:
2278         return rc;
2279 }
2280
2281 /* JIRA 22088 */
2282 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2283 {
2284         struct hwrm_func_qcfg_input req = {0};
2285         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2286         uint16_t flags;
2287         int rc = 0;
2288
2289         HWRM_PREP(req, FUNC_QCFG);
2290         req.fid = rte_cpu_to_le_16(0xffff);
2291
2292         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2293
2294         HWRM_CHECK_RESULT();
2295
2296         /* Hard Coded.. 0xfff VLAN ID mask */
2297         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2298         flags = rte_le_to_cpu_16(resp->flags);
2299         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2300                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2301
2302         switch (resp->port_partition_type) {
2303         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2304         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2305         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2306                 bp->port_partition_type = resp->port_partition_type;
2307                 break;
2308         default:
2309                 bp->port_partition_type = 0;
2310                 break;
2311         }
2312
2313         HWRM_UNLOCK();
2314
2315         return rc;
2316 }
2317
2318 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2319                                    struct hwrm_func_qcaps_output *qcaps)
2320 {
2321         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2322         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2323                sizeof(qcaps->mac_address));
2324         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2325         qcaps->max_rx_rings = fcfg->num_rx_rings;
2326         qcaps->max_tx_rings = fcfg->num_tx_rings;
2327         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2328         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2329         qcaps->max_vfs = 0;
2330         qcaps->first_vf_id = 0;
2331         qcaps->max_vnics = fcfg->num_vnics;
2332         qcaps->max_decap_records = 0;
2333         qcaps->max_encap_records = 0;
2334         qcaps->max_tx_wm_flows = 0;
2335         qcaps->max_tx_em_flows = 0;
2336         qcaps->max_rx_wm_flows = 0;
2337         qcaps->max_rx_em_flows = 0;
2338         qcaps->max_flow_id = 0;
2339         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2340         qcaps->max_sp_tx_rings = 0;
2341         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2342 }
2343
2344 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2345 {
2346         struct hwrm_func_cfg_input req = {0};
2347         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2348         int rc;
2349
2350         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2351                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2352                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2353                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2354                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2355                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2356                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2357                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2358                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2359                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2360         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2361         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2362         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2363                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2364                                    BNXT_NUM_VLANS);
2365         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2366         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2367         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2368         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2369         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2370         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2371         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2372         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2373         req.fid = rte_cpu_to_le_16(0xffff);
2374
2375         HWRM_PREP(req, FUNC_CFG);
2376
2377         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2378
2379         HWRM_CHECK_RESULT();
2380         HWRM_UNLOCK();
2381
2382         return rc;
2383 }
2384
2385 static void populate_vf_func_cfg_req(struct bnxt *bp,
2386                                      struct hwrm_func_cfg_input *req,
2387                                      int num_vfs)
2388 {
2389         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2390                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2391                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2392                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2393                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2394                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2395                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2396                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2397                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2398                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2399
2400         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2401                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2402                                     BNXT_NUM_VLANS);
2403         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2404                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2405                                     BNXT_NUM_VLANS);
2406         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2407                                                 (num_vfs + 1));
2408         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2409         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2410                                                (num_vfs + 1));
2411         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2412         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2413         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2414         /* TODO: For now, do not support VMDq/RFS on VFs. */
2415         req->num_vnics = rte_cpu_to_le_16(1);
2416         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2417                                                  (num_vfs + 1));
2418 }
2419
2420 static void add_random_mac_if_needed(struct bnxt *bp,
2421                                      struct hwrm_func_cfg_input *cfg_req,
2422                                      int vf)
2423 {
2424         struct ether_addr mac;
2425
2426         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2427                 return;
2428
2429         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2430                 cfg_req->enables |=
2431                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2432                 eth_random_addr(cfg_req->dflt_mac_addr);
2433                 bp->pf.vf_info[vf].random_mac = true;
2434         } else {
2435                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2436         }
2437 }
2438
2439 static void reserve_resources_from_vf(struct bnxt *bp,
2440                                       struct hwrm_func_cfg_input *cfg_req,
2441                                       int vf)
2442 {
2443         struct hwrm_func_qcaps_input req = {0};
2444         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2445         int rc;
2446
2447         /* Get the actual allocated values now */
2448         HWRM_PREP(req, FUNC_QCAPS);
2449         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2450         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2451
2452         if (rc) {
2453                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2454                 copy_func_cfg_to_qcaps(cfg_req, resp);
2455         } else if (resp->error_code) {
2456                 rc = rte_le_to_cpu_16(resp->error_code);
2457                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2458                 copy_func_cfg_to_qcaps(cfg_req, resp);
2459         }
2460
2461         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2462         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2463         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2464         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2465         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2466         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2467         /*
2468          * TODO: While not supporting VMDq with VFs, max_vnics is always
2469          * forced to 1 in this case
2470          */
2471         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2472         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2473
2474         HWRM_UNLOCK();
2475 }
2476
2477 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2478 {
2479         struct hwrm_func_qcfg_input req = {0};
2480         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2481         int rc;
2482
2483         /* Check for zero MAC address */
2484         HWRM_PREP(req, FUNC_QCFG);
2485         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2486         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2487         if (rc) {
2488                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2489                 return -1;
2490         } else if (resp->error_code) {
2491                 rc = rte_le_to_cpu_16(resp->error_code);
2492                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2493                 return -1;
2494         }
2495         rc = rte_le_to_cpu_16(resp->vlan);
2496
2497         HWRM_UNLOCK();
2498
2499         return rc;
2500 }
2501
2502 static int update_pf_resource_max(struct bnxt *bp)
2503 {
2504         struct hwrm_func_qcfg_input req = {0};
2505         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2506         int rc;
2507
2508         /* And copy the allocated numbers into the pf struct */
2509         HWRM_PREP(req, FUNC_QCFG);
2510         req.fid = rte_cpu_to_le_16(0xffff);
2511         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2512         HWRM_CHECK_RESULT();
2513
2514         /* Only TX ring value reflects actual allocation? TODO */
2515         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2516         bp->pf.evb_mode = resp->evb_mode;
2517
2518         HWRM_UNLOCK();
2519
2520         return rc;
2521 }
2522
2523 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2524 {
2525         int rc;
2526
2527         if (!BNXT_PF(bp)) {
2528                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2529                 return -1;
2530         }
2531
2532         rc = bnxt_hwrm_func_qcaps(bp);
2533         if (rc)
2534                 return rc;
2535
2536         bp->pf.func_cfg_flags &=
2537                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2538                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2539         bp->pf.func_cfg_flags |=
2540                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2541         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2542         return rc;
2543 }
2544
2545 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2546 {
2547         struct hwrm_func_cfg_input req = {0};
2548         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2549         int i;
2550         size_t sz;
2551         int rc = 0;
2552         size_t req_buf_sz;
2553
2554         if (!BNXT_PF(bp)) {
2555                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2556                 return -1;
2557         }
2558
2559         rc = bnxt_hwrm_func_qcaps(bp);
2560
2561         if (rc)
2562                 return rc;
2563
2564         bp->pf.active_vfs = num_vfs;
2565
2566         /*
2567          * First, configure the PF to only use one TX ring.  This ensures that
2568          * there are enough rings for all VFs.
2569          *
2570          * If we don't do this, when we call func_alloc() later, we will lock
2571          * extra rings to the PF that won't be available during func_cfg() of
2572          * the VFs.
2573          *
2574          * This has been fixed with firmware versions above 20.6.54
2575          */
2576         bp->pf.func_cfg_flags &=
2577                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2578                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2579         bp->pf.func_cfg_flags |=
2580                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2581         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2582         if (rc)
2583                 return rc;
2584
2585         /*
2586          * Now, create and register a buffer to hold forwarded VF requests
2587          */
2588         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2589         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2590                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2591         if (bp->pf.vf_req_buf == NULL) {
2592                 rc = -ENOMEM;
2593                 goto error_free;
2594         }
2595         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2596                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2597         for (i = 0; i < num_vfs; i++)
2598                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2599                                         (i * HWRM_MAX_REQ_LEN);
2600
2601         rc = bnxt_hwrm_func_buf_rgtr(bp);
2602         if (rc)
2603                 goto error_free;
2604
2605         populate_vf_func_cfg_req(bp, &req, num_vfs);
2606
2607         bp->pf.active_vfs = 0;
2608         for (i = 0; i < num_vfs; i++) {
2609                 add_random_mac_if_needed(bp, &req, i);
2610
2611                 HWRM_PREP(req, FUNC_CFG);
2612                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2613                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2614                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2615
2616                 /* Clear enable flag for next pass */
2617                 req.enables &= ~rte_cpu_to_le_32(
2618                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2619
2620                 if (rc || resp->error_code) {
2621                         PMD_DRV_LOG(ERR,
2622                                 "Failed to initizlie VF %d\n", i);
2623                         PMD_DRV_LOG(ERR,
2624                                 "Not all VFs available. (%d, %d)\n",
2625                                 rc, resp->error_code);
2626                         HWRM_UNLOCK();
2627                         break;
2628                 }
2629
2630                 HWRM_UNLOCK();
2631
2632                 reserve_resources_from_vf(bp, &req, i);
2633                 bp->pf.active_vfs++;
2634                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2635         }
2636
2637         /*
2638          * Now configure the PF to use "the rest" of the resources
2639          * We're using STD_TX_RING_MODE here though which will limit the TX
2640          * rings.  This will allow QoS to function properly.  Not setting this
2641          * will cause PF rings to break bandwidth settings.
2642          */
2643         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2644         if (rc)
2645                 goto error_free;
2646
2647         rc = update_pf_resource_max(bp);
2648         if (rc)
2649                 goto error_free;
2650
2651         return rc;
2652
2653 error_free:
2654         bnxt_hwrm_func_buf_unrgtr(bp);
2655         return rc;
2656 }
2657
2658 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2659 {
2660         struct hwrm_func_cfg_input req = {0};
2661         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2662         int rc;
2663
2664         HWRM_PREP(req, FUNC_CFG);
2665
2666         req.fid = rte_cpu_to_le_16(0xffff);
2667         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2668         req.evb_mode = bp->pf.evb_mode;
2669
2670         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2671         HWRM_CHECK_RESULT();
2672         HWRM_UNLOCK();
2673
2674         return rc;
2675 }
2676
2677 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2678                                 uint8_t tunnel_type)
2679 {
2680         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2681         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2682         int rc = 0;
2683
2684         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2685         req.tunnel_type = tunnel_type;
2686         req.tunnel_dst_port_val = port;
2687         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2688         HWRM_CHECK_RESULT();
2689
2690         switch (tunnel_type) {
2691         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2692                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2693                 bp->vxlan_port = port;
2694                 break;
2695         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2696                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2697                 bp->geneve_port = port;
2698                 break;
2699         default:
2700                 break;
2701         }
2702
2703         HWRM_UNLOCK();
2704
2705         return rc;
2706 }
2707
2708 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2709                                 uint8_t tunnel_type)
2710 {
2711         struct hwrm_tunnel_dst_port_free_input req = {0};
2712         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2713         int rc = 0;
2714
2715         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2716
2717         req.tunnel_type = tunnel_type;
2718         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2719         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2720
2721         HWRM_CHECK_RESULT();
2722         HWRM_UNLOCK();
2723
2724         return rc;
2725 }
2726
2727 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2728                                         uint32_t flags)
2729 {
2730         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2731         struct hwrm_func_cfg_input req = {0};
2732         int rc;
2733
2734         HWRM_PREP(req, FUNC_CFG);
2735
2736         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2737         req.flags = rte_cpu_to_le_32(flags);
2738         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2739
2740         HWRM_CHECK_RESULT();
2741         HWRM_UNLOCK();
2742
2743         return rc;
2744 }
2745
2746 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2747 {
2748         uint32_t *flag = flagp;
2749
2750         vnic->flags = *flag;
2751 }
2752
2753 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2754 {
2755         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2756 }
2757
2758 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2759 {
2760         int rc = 0;
2761         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2762         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2763
2764         HWRM_PREP(req, FUNC_BUF_RGTR);
2765
2766         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2767         req.req_buf_page_size = rte_cpu_to_le_16(
2768                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2769         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2770         req.req_buf_page_addr[0] =
2771                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2772         if (req.req_buf_page_addr[0] == 0) {
2773                 PMD_DRV_LOG(ERR,
2774                         "unable to map buffer address to physical memory\n");
2775                 return -ENOMEM;
2776         }
2777
2778         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2779
2780         HWRM_CHECK_RESULT();
2781         HWRM_UNLOCK();
2782
2783         return rc;
2784 }
2785
2786 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2787 {
2788         int rc = 0;
2789         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2790         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2791
2792         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2793
2794         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2795
2796         HWRM_CHECK_RESULT();
2797         HWRM_UNLOCK();
2798
2799         return rc;
2800 }
2801
2802 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2803 {
2804         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2805         struct hwrm_func_cfg_input req = {0};
2806         int rc;
2807
2808         HWRM_PREP(req, FUNC_CFG);
2809
2810         req.fid = rte_cpu_to_le_16(0xffff);
2811         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2812         req.enables = rte_cpu_to_le_32(
2813                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2814         req.async_event_cr = rte_cpu_to_le_16(
2815                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2816         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2817
2818         HWRM_CHECK_RESULT();
2819         HWRM_UNLOCK();
2820
2821         return rc;
2822 }
2823
2824 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2825 {
2826         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2827         struct hwrm_func_vf_cfg_input req = {0};
2828         int rc;
2829
2830         HWRM_PREP(req, FUNC_VF_CFG);
2831
2832         req.enables = rte_cpu_to_le_32(
2833                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2834         req.async_event_cr = rte_cpu_to_le_16(
2835                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2836         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2837
2838         HWRM_CHECK_RESULT();
2839         HWRM_UNLOCK();
2840
2841         return rc;
2842 }
2843
2844 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2845 {
2846         struct hwrm_func_cfg_input req = {0};
2847         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2848         uint16_t dflt_vlan, fid;
2849         uint32_t func_cfg_flags;
2850         int rc = 0;
2851
2852         HWRM_PREP(req, FUNC_CFG);
2853
2854         if (is_vf) {
2855                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2856                 fid = bp->pf.vf_info[vf].fid;
2857                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2858         } else {
2859                 fid = rte_cpu_to_le_16(0xffff);
2860                 func_cfg_flags = bp->pf.func_cfg_flags;
2861                 dflt_vlan = bp->vlan;
2862         }
2863
2864         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2865         req.fid = rte_cpu_to_le_16(fid);
2866         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2867         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2868
2869         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2870
2871         HWRM_CHECK_RESULT();
2872         HWRM_UNLOCK();
2873
2874         return rc;
2875 }
2876
2877 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2878                         uint16_t max_bw, uint16_t enables)
2879 {
2880         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2881         struct hwrm_func_cfg_input req = {0};
2882         int rc;
2883
2884         HWRM_PREP(req, FUNC_CFG);
2885
2886         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2887         req.enables |= rte_cpu_to_le_32(enables);
2888         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2889         req.max_bw = rte_cpu_to_le_32(max_bw);
2890         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2891
2892         HWRM_CHECK_RESULT();
2893         HWRM_UNLOCK();
2894
2895         return rc;
2896 }
2897
2898 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2899 {
2900         struct hwrm_func_cfg_input req = {0};
2901         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2902         int rc = 0;
2903
2904         HWRM_PREP(req, FUNC_CFG);
2905
2906         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2907         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2908         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2909         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2910
2911         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2912
2913         HWRM_CHECK_RESULT();
2914         HWRM_UNLOCK();
2915
2916         return rc;
2917 }
2918
2919 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2920                               void *encaped, size_t ec_size)
2921 {
2922         int rc = 0;
2923         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2924         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2925
2926         if (ec_size > sizeof(req.encap_request))
2927                 return -1;
2928
2929         HWRM_PREP(req, REJECT_FWD_RESP);
2930
2931         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2932         memcpy(req.encap_request, encaped, ec_size);
2933
2934         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2935
2936         HWRM_CHECK_RESULT();
2937         HWRM_UNLOCK();
2938
2939         return rc;
2940 }
2941
2942 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2943                                        struct ether_addr *mac)
2944 {
2945         struct hwrm_func_qcfg_input req = {0};
2946         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2947         int rc;
2948
2949         HWRM_PREP(req, FUNC_QCFG);
2950
2951         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2952         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2953
2954         HWRM_CHECK_RESULT();
2955
2956         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2957
2958         HWRM_UNLOCK();
2959
2960         return rc;
2961 }
2962
2963 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2964                             void *encaped, size_t ec_size)
2965 {
2966         int rc = 0;
2967         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2968         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2969
2970         if (ec_size > sizeof(req.encap_request))
2971                 return -1;
2972
2973         HWRM_PREP(req, EXEC_FWD_RESP);
2974
2975         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2976         memcpy(req.encap_request, encaped, ec_size);
2977
2978         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2979
2980         HWRM_CHECK_RESULT();
2981         HWRM_UNLOCK();
2982
2983         return rc;
2984 }
2985
2986 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2987                          struct rte_eth_stats *stats, uint8_t rx)
2988 {
2989         int rc = 0;
2990         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2991         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2992
2993         HWRM_PREP(req, STAT_CTX_QUERY);
2994
2995         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2996
2997         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2998
2999         HWRM_CHECK_RESULT();
3000
3001         if (rx) {
3002                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3003                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3004                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3005                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3006                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3007                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3008                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3009                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3010         } else {
3011                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3012                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3013                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3014                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3015                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3016                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3017                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3018         }
3019
3020
3021         HWRM_UNLOCK();
3022
3023         return rc;
3024 }
3025
3026 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3027 {
3028         struct hwrm_port_qstats_input req = {0};
3029         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3030         struct bnxt_pf_info *pf = &bp->pf;
3031         int rc;
3032
3033         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3034                 return 0;
3035
3036         HWRM_PREP(req, PORT_QSTATS);
3037
3038         req.port_id = rte_cpu_to_le_16(pf->port_id);
3039         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3040         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3041         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3042
3043         HWRM_CHECK_RESULT();
3044         HWRM_UNLOCK();
3045
3046         return rc;
3047 }
3048
3049 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3050 {
3051         struct hwrm_port_clr_stats_input req = {0};
3052         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3053         struct bnxt_pf_info *pf = &bp->pf;
3054         int rc;
3055
3056         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3057                 return 0;
3058
3059         HWRM_PREP(req, PORT_CLR_STATS);
3060
3061         req.port_id = rte_cpu_to_le_16(pf->port_id);
3062         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3063
3064         HWRM_CHECK_RESULT();
3065         HWRM_UNLOCK();
3066
3067         return rc;
3068 }
3069
3070 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3071 {
3072         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3073         struct hwrm_port_led_qcaps_input req = {0};
3074         int rc;
3075
3076         if (BNXT_VF(bp))
3077                 return 0;
3078
3079         HWRM_PREP(req, PORT_LED_QCAPS);
3080         req.port_id = bp->pf.port_id;
3081         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3082
3083         HWRM_CHECK_RESULT();
3084
3085         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3086                 unsigned int i;
3087
3088                 bp->num_leds = resp->num_leds;
3089                 memcpy(bp->leds, &resp->led0_id,
3090                         sizeof(bp->leds[0]) * bp->num_leds);
3091                 for (i = 0; i < bp->num_leds; i++) {
3092                         struct bnxt_led_info *led = &bp->leds[i];
3093
3094                         uint16_t caps = led->led_state_caps;
3095
3096                         if (!led->led_group_id ||
3097                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3098                                 bp->num_leds = 0;
3099                                 break;
3100                         }
3101                 }
3102         }
3103
3104         HWRM_UNLOCK();
3105
3106         return rc;
3107 }
3108
3109 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3110 {
3111         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3112         struct hwrm_port_led_cfg_input req = {0};
3113         struct bnxt_led_cfg *led_cfg;
3114         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3115         uint16_t duration = 0;
3116         int rc, i;
3117
3118         if (!bp->num_leds || BNXT_VF(bp))
3119                 return -EOPNOTSUPP;
3120
3121         HWRM_PREP(req, PORT_LED_CFG);
3122
3123         if (led_on) {
3124                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3125                 duration = rte_cpu_to_le_16(500);
3126         }
3127         req.port_id = bp->pf.port_id;
3128         req.num_leds = bp->num_leds;
3129         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3130         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3131                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3132                 led_cfg->led_id = bp->leds[i].led_id;
3133                 led_cfg->led_state = led_state;
3134                 led_cfg->led_blink_on = duration;
3135                 led_cfg->led_blink_off = duration;
3136                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3137         }
3138
3139         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3140
3141         HWRM_CHECK_RESULT();
3142         HWRM_UNLOCK();
3143
3144         return rc;
3145 }
3146
3147 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3148                                uint32_t *length)
3149 {
3150         int rc;
3151         struct hwrm_nvm_get_dir_info_input req = {0};
3152         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3153
3154         HWRM_PREP(req, NVM_GET_DIR_INFO);
3155
3156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3157
3158         HWRM_CHECK_RESULT();
3159         HWRM_UNLOCK();
3160
3161         if (!rc) {
3162                 *entries = rte_le_to_cpu_32(resp->entries);
3163                 *length = rte_le_to_cpu_32(resp->entry_length);
3164         }
3165         return rc;
3166 }
3167
3168 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3169 {
3170         int rc;
3171         uint32_t dir_entries;
3172         uint32_t entry_length;
3173         uint8_t *buf;
3174         size_t buflen;
3175         rte_iova_t dma_handle;
3176         struct hwrm_nvm_get_dir_entries_input req = {0};
3177         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3178
3179         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3180         if (rc != 0)
3181                 return rc;
3182
3183         *data++ = dir_entries;
3184         *data++ = entry_length;
3185         len -= 2;
3186         memset(data, 0xff, len);
3187
3188         buflen = dir_entries * entry_length;
3189         buf = rte_malloc("nvm_dir", buflen, 0);
3190         rte_mem_lock_page(buf);
3191         if (buf == NULL)
3192                 return -ENOMEM;
3193         dma_handle = rte_mem_virt2iova(buf);
3194         if (dma_handle == 0) {
3195                 PMD_DRV_LOG(ERR,
3196                         "unable to map response address to physical memory\n");
3197                 return -ENOMEM;
3198         }
3199         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3200         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3201         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3202
3203         HWRM_CHECK_RESULT();
3204         HWRM_UNLOCK();
3205
3206         if (rc == 0)
3207                 memcpy(data, buf, len > buflen ? buflen : len);
3208
3209         rte_free(buf);
3210
3211         return rc;
3212 }
3213
3214 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3215                              uint32_t offset, uint32_t length,
3216                              uint8_t *data)
3217 {
3218         int rc;
3219         uint8_t *buf;
3220         rte_iova_t dma_handle;
3221         struct hwrm_nvm_read_input req = {0};
3222         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3223
3224         buf = rte_malloc("nvm_item", length, 0);
3225         rte_mem_lock_page(buf);
3226         if (!buf)
3227                 return -ENOMEM;
3228
3229         dma_handle = rte_mem_virt2iova(buf);
3230         if (dma_handle == 0) {
3231                 PMD_DRV_LOG(ERR,
3232                         "unable to map response address to physical memory\n");
3233                 return -ENOMEM;
3234         }
3235         HWRM_PREP(req, NVM_READ);
3236         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3237         req.dir_idx = rte_cpu_to_le_16(index);
3238         req.offset = rte_cpu_to_le_32(offset);
3239         req.len = rte_cpu_to_le_32(length);
3240         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3241         HWRM_CHECK_RESULT();
3242         HWRM_UNLOCK();
3243         if (rc == 0)
3244                 memcpy(data, buf, length);
3245
3246         rte_free(buf);
3247         return rc;
3248 }
3249
3250 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3251 {
3252         int rc;
3253         struct hwrm_nvm_erase_dir_entry_input req = {0};
3254         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3255
3256         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3257         req.dir_idx = rte_cpu_to_le_16(index);
3258         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3259         HWRM_CHECK_RESULT();
3260         HWRM_UNLOCK();
3261
3262         return rc;
3263 }
3264
3265
3266 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3267                           uint16_t dir_ordinal, uint16_t dir_ext,
3268                           uint16_t dir_attr, const uint8_t *data,
3269                           size_t data_len)
3270 {
3271         int rc;
3272         struct hwrm_nvm_write_input req = {0};
3273         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3274         rte_iova_t dma_handle;
3275         uint8_t *buf;
3276
3277         HWRM_PREP(req, NVM_WRITE);
3278
3279         req.dir_type = rte_cpu_to_le_16(dir_type);
3280         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3281         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3282         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3283         req.dir_data_length = rte_cpu_to_le_32(data_len);
3284
3285         buf = rte_malloc("nvm_write", data_len, 0);
3286         rte_mem_lock_page(buf);
3287         if (!buf)
3288                 return -ENOMEM;
3289
3290         dma_handle = rte_mem_virt2iova(buf);
3291         if (dma_handle == 0) {
3292                 PMD_DRV_LOG(ERR,
3293                         "unable to map response address to physical memory\n");
3294                 return -ENOMEM;
3295         }
3296         memcpy(buf, data, data_len);
3297         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3298
3299         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3300
3301         HWRM_CHECK_RESULT();
3302         HWRM_UNLOCK();
3303
3304         rte_free(buf);
3305         return rc;
3306 }
3307
3308 static void
3309 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3310 {
3311         uint32_t *count = cbdata;
3312
3313         *count = *count + 1;
3314 }
3315
3316 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3317                                      struct bnxt_vnic_info *vnic __rte_unused)
3318 {
3319         return 0;
3320 }
3321
3322 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3323 {
3324         uint32_t count = 0;
3325
3326         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3327             &count, bnxt_vnic_count_hwrm_stub);
3328
3329         return count;
3330 }
3331
3332 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3333                                         uint16_t *vnic_ids)
3334 {
3335         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3336         struct hwrm_func_vf_vnic_ids_query_output *resp =
3337                                                 bp->hwrm_cmd_resp_addr;
3338         int rc;
3339
3340         /* First query all VNIC ids */
3341         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3342
3343         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3344         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3345         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3346
3347         if (req.vnic_id_tbl_addr == 0) {
3348                 HWRM_UNLOCK();
3349                 PMD_DRV_LOG(ERR,
3350                 "unable to map VNIC ID table address to physical memory\n");
3351                 return -ENOMEM;
3352         }
3353         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3354         if (rc) {
3355                 HWRM_UNLOCK();
3356                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3357                 return -1;
3358         } else if (resp->error_code) {
3359                 rc = rte_le_to_cpu_16(resp->error_code);
3360                 HWRM_UNLOCK();
3361                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3362                 return -1;
3363         }
3364         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3365
3366         HWRM_UNLOCK();
3367
3368         return rc;
3369 }
3370
3371 /*
3372  * This function queries the VNIC IDs  for a specified VF. It then calls
3373  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3374  * Then it calls the hwrm_cb function to program this new vnic configuration.
3375  */
3376 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3377         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3378         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3379 {
3380         struct bnxt_vnic_info vnic;
3381         int rc = 0;
3382         int i, num_vnic_ids;
3383         uint16_t *vnic_ids;
3384         size_t vnic_id_sz;
3385         size_t sz;
3386
3387         /* First query all VNIC ids */
3388         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3389         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3390                         RTE_CACHE_LINE_SIZE);
3391         if (vnic_ids == NULL) {
3392                 rc = -ENOMEM;
3393                 return rc;
3394         }
3395         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3396                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3397
3398         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3399
3400         if (num_vnic_ids < 0)
3401                 return num_vnic_ids;
3402
3403         /* Retrieve VNIC, update bd_stall then update */
3404
3405         for (i = 0; i < num_vnic_ids; i++) {
3406                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3407                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3408                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3409                 if (rc)
3410                         break;
3411                 if (vnic.mru <= 4)      /* Indicates unallocated */
3412                         continue;
3413
3414                 vnic_cb(&vnic, cbdata);
3415
3416                 rc = hwrm_cb(bp, &vnic);
3417                 if (rc)
3418                         break;
3419         }
3420
3421         rte_free(vnic_ids);
3422
3423         return rc;
3424 }
3425
3426 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3427                                               bool on)
3428 {
3429         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3430         struct hwrm_func_cfg_input req = {0};
3431         int rc;
3432
3433         HWRM_PREP(req, FUNC_CFG);
3434
3435         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3436         req.enables |= rte_cpu_to_le_32(
3437                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3438         req.vlan_antispoof_mode = on ?
3439                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3440                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3441         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3442
3443         HWRM_CHECK_RESULT();
3444         HWRM_UNLOCK();
3445
3446         return rc;
3447 }
3448
3449 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3450 {
3451         struct bnxt_vnic_info vnic;
3452         uint16_t *vnic_ids;
3453         size_t vnic_id_sz;
3454         int num_vnic_ids, i;
3455         size_t sz;
3456         int rc;
3457
3458         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3459         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3460                         RTE_CACHE_LINE_SIZE);
3461         if (vnic_ids == NULL) {
3462                 rc = -ENOMEM;
3463                 return rc;
3464         }
3465
3466         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3467                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3468
3469         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3470         if (rc <= 0)
3471                 goto exit;
3472         num_vnic_ids = rc;
3473
3474         /*
3475          * Loop through to find the default VNIC ID.
3476          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3477          * by sending the hwrm_func_qcfg command to the firmware.
3478          */
3479         for (i = 0; i < num_vnic_ids; i++) {
3480                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3481                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3482                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3483                                         bp->pf.first_vf_id + vf);
3484                 if (rc)
3485                         goto exit;
3486                 if (vnic.func_default) {
3487                         rte_free(vnic_ids);
3488                         return vnic.fw_vnic_id;
3489                 }
3490         }
3491         /* Could not find a default VNIC. */
3492         PMD_DRV_LOG(ERR, "No default VNIC\n");
3493 exit:
3494         rte_free(vnic_ids);
3495         return -1;
3496 }
3497
3498 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3499                          uint16_t dst_id,
3500                          struct bnxt_filter_info *filter)
3501 {
3502         int rc = 0;
3503         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3504         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3505         uint32_t enables = 0;
3506
3507         if (filter->fw_em_filter_id != UINT64_MAX)
3508                 bnxt_hwrm_clear_em_filter(bp, filter);
3509
3510         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3511
3512         req.flags = rte_cpu_to_le_32(filter->flags);
3513
3514         enables = filter->enables |
3515               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3516         req.dst_id = rte_cpu_to_le_16(dst_id);
3517
3518         if (filter->ip_addr_type) {
3519                 req.ip_addr_type = filter->ip_addr_type;
3520                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3521         }
3522         if (enables &
3523             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3524                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3525         if (enables &
3526             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3527                 memcpy(req.src_macaddr, filter->src_macaddr,
3528                        ETHER_ADDR_LEN);
3529         if (enables &
3530             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3531                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3532                        ETHER_ADDR_LEN);
3533         if (enables &
3534             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3535                 req.ovlan_vid = filter->l2_ovlan;
3536         if (enables &
3537             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3538                 req.ivlan_vid = filter->l2_ivlan;
3539         if (enables &
3540             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3541                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3542         if (enables &
3543             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3544                 req.ip_protocol = filter->ip_protocol;
3545         if (enables &
3546             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3547                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3548         if (enables &
3549             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3550                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3551         if (enables &
3552             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3553                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3554         if (enables &
3555             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3556                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3557         if (enables &
3558             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3559                 req.mirror_vnic_id = filter->mirror_vnic_id;
3560
3561         req.enables = rte_cpu_to_le_32(enables);
3562
3563         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3564
3565         HWRM_CHECK_RESULT();
3566
3567         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3568         HWRM_UNLOCK();
3569
3570         return rc;
3571 }
3572
3573 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3574 {
3575         int rc = 0;
3576         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3577         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3578
3579         if (filter->fw_em_filter_id == UINT64_MAX)
3580                 return 0;
3581
3582         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3583         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3584
3585         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3586
3587         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3588
3589         HWRM_CHECK_RESULT();
3590         HWRM_UNLOCK();
3591
3592         filter->fw_em_filter_id = UINT64_MAX;
3593         filter->fw_l2_filter_id = UINT64_MAX;
3594
3595         return 0;
3596 }
3597
3598 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3599                          uint16_t dst_id,
3600                          struct bnxt_filter_info *filter)
3601 {
3602         int rc = 0;
3603         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3604         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3605                                                 bp->hwrm_cmd_resp_addr;
3606         uint32_t enables = 0;
3607
3608         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3609                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3610
3611         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3612
3613         req.flags = rte_cpu_to_le_32(filter->flags);
3614
3615         enables = filter->enables |
3616               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3617         req.dst_id = rte_cpu_to_le_16(dst_id);
3618
3619
3620         if (filter->ip_addr_type) {
3621                 req.ip_addr_type = filter->ip_addr_type;
3622                 enables |=
3623                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3624         }
3625         if (enables &
3626             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3627                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3628         if (enables &
3629             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3630                 memcpy(req.src_macaddr, filter->src_macaddr,
3631                        ETHER_ADDR_LEN);
3632         //if (enables &
3633             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3634                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3635                        //ETHER_ADDR_LEN);
3636         if (enables &
3637             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3638                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3639         if (enables &
3640             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3641                 req.ip_protocol = filter->ip_protocol;
3642         if (enables &
3643             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3644                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3645         if (enables &
3646             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3647                 req.src_ipaddr_mask[0] =
3648                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3649         if (enables &
3650             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3651                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3652         if (enables &
3653             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3654                 req.dst_ipaddr_mask[0] =
3655                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3656         if (enables &
3657             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3658                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3659         if (enables &
3660             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3661                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3662         if (enables &
3663             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3664                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3665         if (enables &
3666             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3667                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3668         if (enables &
3669             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3670                 req.mirror_vnic_id = filter->mirror_vnic_id;
3671
3672         req.enables = rte_cpu_to_le_32(enables);
3673
3674         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3675
3676         HWRM_CHECK_RESULT();
3677
3678         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3679         HWRM_UNLOCK();
3680
3681         return rc;
3682 }
3683
3684 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3685                                 struct bnxt_filter_info *filter)
3686 {
3687         int rc = 0;
3688         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3689         struct hwrm_cfa_ntuple_filter_free_output *resp =
3690                                                 bp->hwrm_cmd_resp_addr;
3691
3692         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3693                 return 0;
3694
3695         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3696
3697         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3698
3699         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3700
3701         HWRM_CHECK_RESULT();
3702         HWRM_UNLOCK();
3703
3704         filter->fw_ntuple_filter_id = UINT64_MAX;
3705         filter->fw_l2_filter_id = UINT64_MAX;
3706
3707         return 0;
3708 }
3709
3710 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3711 {
3712         unsigned int rss_idx, fw_idx, i;
3713
3714         if (vnic->rss_table && vnic->hash_type) {
3715                 /*
3716                  * Fill the RSS hash & redirection table with
3717                  * ring group ids for all VNICs
3718                  */
3719                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3720                         rss_idx++, fw_idx++) {
3721                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3722                                 fw_idx %= bp->rx_cp_nr_rings;
3723                                 if (vnic->fw_grp_ids[fw_idx] !=
3724                                     INVALID_HW_RING_ID)
3725                                         break;
3726                                 fw_idx++;
3727                         }
3728                         if (i == bp->rx_cp_nr_rings)
3729                                 return 0;
3730                         vnic->rss_table[rss_idx] =
3731                                 vnic->fw_grp_ids[fw_idx];
3732                 }
3733                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3734         }
3735         return 0;
3736 }