net/bnxt: clear HWRM sniffer list for PFs
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30 #define HWRM_VERSION_1_9_1              0x10901
31
32 struct bnxt_plcmodes_cfg {
33         uint32_t        flags;
34         uint16_t        jumbo_thresh;
35         uint16_t        hds_offset;
36         uint16_t        hds_threshold;
37 };
38
39 static int page_getenum(size_t size)
40 {
41         if (size <= 1 << 4)
42                 return 4;
43         if (size <= 1 << 12)
44                 return 12;
45         if (size <= 1 << 13)
46                 return 13;
47         if (size <= 1 << 16)
48                 return 16;
49         if (size <= 1 << 21)
50                 return 21;
51         if (size <= 1 << 22)
52                 return 22;
53         if (size <= 1 << 30)
54                 return 30;
55         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
56         return sizeof(void *) * 8 - 1;
57 }
58
59 static int page_roundup(size_t size)
60 {
61         return 1 << page_getenum(size);
62 }
63
64 /*
65  * HWRM Functions (sent to HWRM)
66  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
67  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
68  * command was failed by the ChiMP.
69  */
70
71 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
72                                         uint32_t msg_len)
73 {
74         unsigned int i;
75         struct input *req = msg;
76         struct output *resp = bp->hwrm_cmd_resp_addr;
77         uint32_t *data = msg;
78         uint8_t *bar;
79         uint8_t *valid;
80         uint16_t max_req_len = bp->max_req_len;
81         struct hwrm_short_input short_input = { 0 };
82
83         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
84                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
85
86                 memset(short_cmd_req, 0, bp->max_req_len);
87                 memcpy(short_cmd_req, req, msg_len);
88
89                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
90                 short_input.signature = rte_cpu_to_le_16(
91                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
92                 short_input.size = rte_cpu_to_le_16(msg_len);
93                 short_input.req_addr =
94                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
95
96                 data = (uint32_t *)&short_input;
97                 msg_len = sizeof(short_input);
98
99                 /* Sync memory write before updating doorbell */
100                 rte_wmb();
101
102                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
103         }
104
105         /* Write request msg to hwrm channel */
106         for (i = 0; i < msg_len; i += 4) {
107                 bar = (uint8_t *)bp->bar0 + i;
108                 rte_write32(*data, bar);
109                 data++;
110         }
111
112         /* Zero the rest of the request space */
113         for (; i < max_req_len; i += 4) {
114                 bar = (uint8_t *)bp->bar0 + i;
115                 rte_write32(0, bar);
116         }
117
118         /* Ring channel doorbell */
119         bar = (uint8_t *)bp->bar0 + 0x100;
120         rte_write32(1, bar);
121
122         /* Poll for the valid bit */
123         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
124                 /* Sanity check on the resp->resp_len */
125                 rte_rmb();
126                 if (resp->resp_len && resp->resp_len <=
127                                 bp->max_resp_len) {
128                         /* Last byte of resp contains the valid key */
129                         valid = (uint8_t *)resp + resp->resp_len - 1;
130                         if (*valid == HWRM_RESP_VALID_KEY)
131                                 break;
132                 }
133                 rte_delay_us(600);
134         }
135
136         if (i >= HWRM_CMD_TIMEOUT) {
137                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
138                         req->req_type);
139                 goto err_ret;
140         }
141         return 0;
142
143 err_ret:
144         return -1;
145 }
146
147 /*
148  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
149  * spinlock, and does initial processing.
150  *
151  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
152  * releases the spinlock only if it returns.  If the regular int return codes
153  * are not used by the function, HWRM_CHECK_RESULT() should not be used
154  * directly, rather it should be copied and modified to suit the function.
155  *
156  * HWRM_UNLOCK() must be called after all response processing is completed.
157  */
158 #define HWRM_PREP(req, type) do { \
159         rte_spinlock_lock(&bp->hwrm_lock); \
160         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
161         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
162         req.cmpl_ring = rte_cpu_to_le_16(-1); \
163         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
164         req.target_id = rte_cpu_to_le_16(0xffff); \
165         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
166 } while (0)
167
168 #define HWRM_CHECK_RESULT() do {\
169         if (rc) { \
170                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
171                 rte_spinlock_unlock(&bp->hwrm_lock); \
172                 return rc; \
173         } \
174         if (resp->error_code) { \
175                 rc = rte_le_to_cpu_16(resp->error_code); \
176                 if (resp->resp_len >= 16) { \
177                         struct hwrm_err_output *tmp_hwrm_err_op = \
178                                                 (void *)resp; \
179                         PMD_DRV_LOG(ERR, \
180                                 "error %d:%d:%08x:%04x\n", \
181                                 rc, tmp_hwrm_err_op->cmd_err, \
182                                 rte_le_to_cpu_32(\
183                                         tmp_hwrm_err_op->opaque_0), \
184                                 rte_le_to_cpu_16(\
185                                         tmp_hwrm_err_op->opaque_1)); \
186                 } else { \
187                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
188                 } \
189                 rte_spinlock_unlock(&bp->hwrm_lock); \
190                 return rc; \
191         } \
192 } while (0)
193
194 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
195
196 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
197 {
198         int rc = 0;
199         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
200         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
201
202         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
203         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
204         req.mask = 0;
205
206         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
207
208         HWRM_CHECK_RESULT();
209         HWRM_UNLOCK();
210
211         return rc;
212 }
213
214 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
215                                  struct bnxt_vnic_info *vnic,
216                                  uint16_t vlan_count,
217                                  struct bnxt_vlan_table_entry *vlan_table)
218 {
219         int rc = 0;
220         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
221         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
222         uint32_t mask = 0;
223
224         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
225                 return rc;
226
227         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
228         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
229
230         /* FIXME add multicast flag, when multicast adding options is supported
231          * by ethtool.
232          */
233         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
234                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
235         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
236                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
237         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
238                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
239         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
240                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
241         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
242                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
243         if (vnic->mc_addr_cnt) {
244                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
245                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
246                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
247         }
248         if (vlan_table) {
249                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
250                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
251                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
252                          rte_mem_virt2iova(vlan_table));
253                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
254         }
255         req.mask = rte_cpu_to_le_32(mask);
256
257         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
258
259         HWRM_CHECK_RESULT();
260         HWRM_UNLOCK();
261
262         return rc;
263 }
264
265 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
266                         uint16_t vlan_count,
267                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
268 {
269         int rc = 0;
270         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
271         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
272                                                 bp->hwrm_cmd_resp_addr;
273
274         /*
275          * Older HWRM versions did not support this command, and the set_rx_mask
276          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
277          * removed from set_rx_mask call, and this command was added.
278          *
279          * This command is also present from 1.7.8.11 and higher,
280          * as well as 1.7.8.0
281          */
282         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
283                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
284                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
285                                         (11)))
286                                 return 0;
287                 }
288         }
289         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
290         req.fid = rte_cpu_to_le_16(fid);
291
292         req.vlan_tag_mask_tbl_addr =
293                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
294         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
295
296         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
297
298         HWRM_CHECK_RESULT();
299         HWRM_UNLOCK();
300
301         return rc;
302 }
303
304 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
305                            struct bnxt_filter_info *filter)
306 {
307         int rc = 0;
308         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
309         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
310
311         if (filter->fw_l2_filter_id == UINT64_MAX)
312                 return 0;
313
314         HWRM_PREP(req, CFA_L2_FILTER_FREE);
315
316         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
317
318         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
319
320         HWRM_CHECK_RESULT();
321         HWRM_UNLOCK();
322
323         filter->fw_l2_filter_id = UINT64_MAX;
324
325         return 0;
326 }
327
328 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
329                          uint16_t dst_id,
330                          struct bnxt_filter_info *filter)
331 {
332         int rc = 0;
333         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
334         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
335         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
336         const struct rte_eth_vmdq_rx_conf *conf =
337                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
338         uint32_t enables = 0;
339         uint16_t j = dst_id - 1;
340
341         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
342         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
343             conf->pool_map[j].pools & (1UL << j)) {
344                 PMD_DRV_LOG(DEBUG,
345                         "Add vlan %u to vmdq pool %u\n",
346                         conf->pool_map[j].vlan_id, j);
347
348                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
349                 filter->enables |=
350                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
351                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
352         }
353
354         if (filter->fw_l2_filter_id != UINT64_MAX)
355                 bnxt_hwrm_clear_l2_filter(bp, filter);
356
357         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
358
359         req.flags = rte_cpu_to_le_32(filter->flags);
360
361         enables = filter->enables |
362               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
363         req.dst_id = rte_cpu_to_le_16(dst_id);
364
365         if (enables &
366             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
367                 memcpy(req.l2_addr, filter->l2_addr,
368                        ETHER_ADDR_LEN);
369         if (enables &
370             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
371                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
372                        ETHER_ADDR_LEN);
373         if (enables &
374             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
375                 req.l2_ovlan = filter->l2_ovlan;
376         if (enables &
377             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
378                 req.l2_ovlan = filter->l2_ivlan;
379         if (enables &
380             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
381                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
382         if (enables &
383             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
384                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
385         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
386                 req.src_id = rte_cpu_to_le_32(filter->src_id);
387         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
388                 req.src_type = filter->src_type;
389
390         req.enables = rte_cpu_to_le_32(enables);
391
392         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
393
394         HWRM_CHECK_RESULT();
395
396         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
397         HWRM_UNLOCK();
398
399         return rc;
400 }
401
402 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
403 {
404         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
405         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
406         uint32_t flags = 0;
407         int rc;
408
409         if (!ptp)
410                 return 0;
411
412         HWRM_PREP(req, PORT_MAC_CFG);
413
414         if (ptp->rx_filter)
415                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
416         else
417                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
418         if (ptp->tx_tstamp_en)
419                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
420         else
421                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
422         req.flags = rte_cpu_to_le_32(flags);
423         req.enables =
424         rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
425         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
426
427         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
428         HWRM_UNLOCK();
429
430         return rc;
431 }
432
433 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
434 {
435         int rc = 0;
436         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
437         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
438         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
439
440 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
441         if (ptp)
442                 return 0;
443
444         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
445
446         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
447
448         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
449
450         HWRM_CHECK_RESULT();
451
452         if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
453                 return 0;
454
455         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
456         if (!ptp)
457                 return -ENOMEM;
458
459         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
460                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
461         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
462                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
463         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
464                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
465         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
466                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
467         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
468                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
469         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
470                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
471         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
472                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
473         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
474                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
475         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
476                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
477
478         ptp->bp = bp;
479         bp->ptp_cfg = ptp;
480
481         return 0;
482 }
483
484 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
485 {
486         int rc = 0;
487         struct hwrm_func_qcaps_input req = {.req_type = 0 };
488         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
489         uint16_t new_max_vfs;
490         uint32_t flags;
491         int i;
492
493         HWRM_PREP(req, FUNC_QCAPS);
494
495         req.fid = rte_cpu_to_le_16(0xffff);
496
497         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
498
499         HWRM_CHECK_RESULT();
500
501         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
502         flags = rte_le_to_cpu_32(resp->flags);
503         if (BNXT_PF(bp)) {
504                 bp->pf.port_id = resp->port_id;
505                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
506                 new_max_vfs = bp->pdev->max_vfs;
507                 if (new_max_vfs != bp->pf.max_vfs) {
508                         if (bp->pf.vf_info)
509                                 rte_free(bp->pf.vf_info);
510                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
511                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
512                         bp->pf.max_vfs = new_max_vfs;
513                         for (i = 0; i < new_max_vfs; i++) {
514                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
515                                 bp->pf.vf_info[i].vlan_table =
516                                         rte_zmalloc("VF VLAN table",
517                                                     getpagesize(),
518                                                     getpagesize());
519                                 if (bp->pf.vf_info[i].vlan_table == NULL)
520                                         PMD_DRV_LOG(ERR,
521                                         "Fail to alloc VLAN table for VF %d\n",
522                                         i);
523                                 else
524                                         rte_mem_lock_page(
525                                                 bp->pf.vf_info[i].vlan_table);
526                                 bp->pf.vf_info[i].vlan_as_table =
527                                         rte_zmalloc("VF VLAN AS table",
528                                                     getpagesize(),
529                                                     getpagesize());
530                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
531                                         PMD_DRV_LOG(ERR,
532                                         "Alloc VLAN AS table for VF %d fail\n",
533                                         i);
534                                 else
535                                         rte_mem_lock_page(
536                                                bp->pf.vf_info[i].vlan_as_table);
537                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
538                         }
539                 }
540         }
541
542         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
543         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
544         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
545         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
546         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
547         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
548         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
549         /* TODO: For now, do not support VMDq/RFS on VFs. */
550         if (BNXT_PF(bp)) {
551                 if (bp->pf.max_vfs)
552                         bp->max_vnics = 1;
553                 else
554                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
555         } else {
556                 bp->max_vnics = 1;
557         }
558         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
559         if (BNXT_PF(bp)) {
560                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
561                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
562                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
563                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
564                         HWRM_UNLOCK();
565                         bnxt_hwrm_ptp_qcfg(bp);
566                 }
567         }
568
569         HWRM_UNLOCK();
570
571         return rc;
572 }
573
574 int bnxt_hwrm_func_reset(struct bnxt *bp)
575 {
576         int rc = 0;
577         struct hwrm_func_reset_input req = {.req_type = 0 };
578         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
579
580         HWRM_PREP(req, FUNC_RESET);
581
582         req.enables = rte_cpu_to_le_32(0);
583
584         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
585
586         HWRM_CHECK_RESULT();
587         HWRM_UNLOCK();
588
589         return rc;
590 }
591
592 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
593 {
594         int rc;
595         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
596         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
597
598         if (bp->flags & BNXT_FLAG_REGISTERED)
599                 return 0;
600
601         HWRM_PREP(req, FUNC_DRV_RGTR);
602         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
603                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
604         req.ver_maj = RTE_VER_YEAR;
605         req.ver_min = RTE_VER_MONTH;
606         req.ver_upd = RTE_VER_MINOR;
607
608         if (BNXT_PF(bp)) {
609                 req.enables |= rte_cpu_to_le_32(
610                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
611                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
612                        RTE_MIN(sizeof(req.vf_req_fwd),
613                                sizeof(bp->pf.vf_req_fwd)));
614
615                 /*
616                  * PF can sniff HWRM API issued by VF. This can be set up by
617                  * linux driver and inherited by the DPDK PF driver. Clear
618                  * this HWRM sniffer list in FW because DPDK PF driver does
619                  * not support this.
620                  */
621                 req.flags =
622                 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
623         }
624
625         req.async_event_fwd[0] |=
626                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
627                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
628                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
629         req.async_event_fwd[1] |=
630                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
631                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
632
633         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
634
635         HWRM_CHECK_RESULT();
636         HWRM_UNLOCK();
637
638         bp->flags |= BNXT_FLAG_REGISTERED;
639
640         return rc;
641 }
642
643 int bnxt_hwrm_ver_get(struct bnxt *bp)
644 {
645         int rc = 0;
646         struct hwrm_ver_get_input req = {.req_type = 0 };
647         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
648         uint32_t my_version;
649         uint32_t fw_version;
650         uint16_t max_resp_len;
651         char type[RTE_MEMZONE_NAMESIZE];
652         uint32_t dev_caps_cfg;
653
654         bp->max_req_len = HWRM_MAX_REQ_LEN;
655         HWRM_PREP(req, VER_GET);
656
657         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
658         req.hwrm_intf_min = HWRM_VERSION_MINOR;
659         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
660
661         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
662
663         HWRM_CHECK_RESULT();
664
665         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
666                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
667                 resp->hwrm_intf_upd,
668                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
669         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
670                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
671         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
672                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
673
674         my_version = HWRM_VERSION_MAJOR << 16;
675         my_version |= HWRM_VERSION_MINOR << 8;
676         my_version |= HWRM_VERSION_UPDATE;
677
678         fw_version = resp->hwrm_intf_maj << 16;
679         fw_version |= resp->hwrm_intf_min << 8;
680         fw_version |= resp->hwrm_intf_upd;
681         bp->hwrm_spec_code = fw_version;
682
683         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
684                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
685                 rc = -EINVAL;
686                 goto error;
687         }
688
689         if (my_version != fw_version) {
690                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
691                 if (my_version < fw_version) {
692                         PMD_DRV_LOG(INFO,
693                                 "Firmware API version is newer than driver.\n");
694                         PMD_DRV_LOG(INFO,
695                                 "The driver may be missing features.\n");
696                 } else {
697                         PMD_DRV_LOG(INFO,
698                                 "Firmware API version is older than driver.\n");
699                         PMD_DRV_LOG(INFO,
700                                 "Not all driver features may be functional.\n");
701                 }
702         }
703
704         if (bp->max_req_len > resp->max_req_win_len) {
705                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
706                 rc = -EINVAL;
707         }
708         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
709         max_resp_len = resp->max_resp_len;
710         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
711
712         if (bp->max_resp_len != max_resp_len) {
713                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
714                         bp->pdev->addr.domain, bp->pdev->addr.bus,
715                         bp->pdev->addr.devid, bp->pdev->addr.function);
716
717                 rte_free(bp->hwrm_cmd_resp_addr);
718
719                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
720                 if (bp->hwrm_cmd_resp_addr == NULL) {
721                         rc = -ENOMEM;
722                         goto error;
723                 }
724                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
725                 bp->hwrm_cmd_resp_dma_addr =
726                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
727                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
728                         PMD_DRV_LOG(ERR,
729                         "Unable to map response buffer to physical memory.\n");
730                         rc = -ENOMEM;
731                         goto error;
732                 }
733                 bp->max_resp_len = max_resp_len;
734         }
735
736         if ((dev_caps_cfg &
737                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
738             (dev_caps_cfg &
739              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
740                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
741
742                 rte_free(bp->hwrm_short_cmd_req_addr);
743
744                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
745                                                         bp->max_req_len, 0);
746                 if (bp->hwrm_short_cmd_req_addr == NULL) {
747                         rc = -ENOMEM;
748                         goto error;
749                 }
750                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
751                 bp->hwrm_short_cmd_req_dma_addr =
752                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
753                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
754                         rte_free(bp->hwrm_short_cmd_req_addr);
755                         PMD_DRV_LOG(ERR,
756                                 "Unable to map buffer to physical memory.\n");
757                         rc = -ENOMEM;
758                         goto error;
759                 }
760
761                 bp->flags |= BNXT_FLAG_SHORT_CMD;
762         }
763
764 error:
765         HWRM_UNLOCK();
766         return rc;
767 }
768
769 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
770 {
771         int rc;
772         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
773         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
774
775         if (!(bp->flags & BNXT_FLAG_REGISTERED))
776                 return 0;
777
778         HWRM_PREP(req, FUNC_DRV_UNRGTR);
779         req.flags = flags;
780
781         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
782
783         HWRM_CHECK_RESULT();
784         HWRM_UNLOCK();
785
786         bp->flags &= ~BNXT_FLAG_REGISTERED;
787
788         return rc;
789 }
790
791 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
792 {
793         int rc = 0;
794         struct hwrm_port_phy_cfg_input req = {0};
795         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
796         uint32_t enables = 0;
797
798         HWRM_PREP(req, PORT_PHY_CFG);
799
800         if (conf->link_up) {
801                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
802                 if (bp->link_info.auto_mode && conf->link_speed) {
803                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
804                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
805                 }
806
807                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
808                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
809                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
810                 /*
811                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
812                  * any auto mode, even "none".
813                  */
814                 if (!conf->link_speed) {
815                         /* No speeds specified. Enable AutoNeg - all speeds */
816                         req.auto_mode =
817                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
818                 }
819                 /* AutoNeg - Advertise speeds specified. */
820                 if (conf->auto_link_speed_mask &&
821                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
822                         req.auto_mode =
823                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
824                         req.auto_link_speed_mask =
825                                 conf->auto_link_speed_mask;
826                         enables |=
827                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
828                 }
829
830                 req.auto_duplex = conf->duplex;
831                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
832                 req.auto_pause = conf->auto_pause;
833                 req.force_pause = conf->force_pause;
834                 /* Set force_pause if there is no auto or if there is a force */
835                 if (req.auto_pause && !req.force_pause)
836                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
837                 else
838                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
839
840                 req.enables = rte_cpu_to_le_32(enables);
841         } else {
842                 req.flags =
843                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
844                 PMD_DRV_LOG(INFO, "Force Link Down\n");
845         }
846
847         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
848
849         HWRM_CHECK_RESULT();
850         HWRM_UNLOCK();
851
852         return rc;
853 }
854
855 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
856                                    struct bnxt_link_info *link_info)
857 {
858         int rc = 0;
859         struct hwrm_port_phy_qcfg_input req = {0};
860         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
861
862         HWRM_PREP(req, PORT_PHY_QCFG);
863
864         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
865
866         HWRM_CHECK_RESULT();
867
868         link_info->phy_link_status = resp->link;
869         link_info->link_up =
870                 (link_info->phy_link_status ==
871                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
872         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
873         link_info->duplex = resp->duplex_cfg;
874         link_info->pause = resp->pause;
875         link_info->auto_pause = resp->auto_pause;
876         link_info->force_pause = resp->force_pause;
877         link_info->auto_mode = resp->auto_mode;
878         link_info->phy_type = resp->phy_type;
879         link_info->media_type = resp->media_type;
880
881         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
882         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
883         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
884         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
885         link_info->phy_ver[0] = resp->phy_maj;
886         link_info->phy_ver[1] = resp->phy_min;
887         link_info->phy_ver[2] = resp->phy_bld;
888
889         HWRM_UNLOCK();
890
891         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
892         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
893         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
894         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
895         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
896                     link_info->auto_link_speed_mask);
897         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
898                     link_info->force_link_speed);
899
900         return rc;
901 }
902
903 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
904 {
905         int rc = 0;
906         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
907         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
908         int i;
909
910         HWRM_PREP(req, QUEUE_QPORTCFG);
911
912         req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
913         /* HWRM Version >= 1.9.1 */
914         if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
915                 req.drv_qmap_cap =
916                         HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
917         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
918
919         HWRM_CHECK_RESULT();
920
921 #define GET_QUEUE_INFO(x) \
922         bp->cos_queue[x].id = resp->queue_id##x; \
923         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
924
925         GET_QUEUE_INFO(0);
926         GET_QUEUE_INFO(1);
927         GET_QUEUE_INFO(2);
928         GET_QUEUE_INFO(3);
929         GET_QUEUE_INFO(4);
930         GET_QUEUE_INFO(5);
931         GET_QUEUE_INFO(6);
932         GET_QUEUE_INFO(7);
933
934         HWRM_UNLOCK();
935
936         if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
937                 bp->tx_cosq_id = bp->cos_queue[0].id;
938         } else {
939                 /* iterate and find the COSq profile to use for Tx */
940                 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
941                         if (bp->cos_queue[i].profile ==
942                                 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
943                                 bp->tx_cosq_id = bp->cos_queue[i].id;
944                                 break;
945                         }
946                 }
947         }
948         PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
949
950         return rc;
951 }
952
953 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
954                          struct bnxt_ring *ring,
955                          uint32_t ring_type, uint32_t map_index,
956                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
957 {
958         int rc = 0;
959         uint32_t enables = 0;
960         struct hwrm_ring_alloc_input req = {.req_type = 0 };
961         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
962
963         HWRM_PREP(req, RING_ALLOC);
964
965         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
966         req.fbo = rte_cpu_to_le_32(0);
967         /* Association of ring index with doorbell index */
968         req.logical_id = rte_cpu_to_le_16(map_index);
969         req.length = rte_cpu_to_le_32(ring->ring_size);
970
971         switch (ring_type) {
972         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
973                 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
974                 /* FALLTHROUGH */
975         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
976                 req.ring_type = ring_type;
977                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
978                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
979                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
980                         enables |=
981                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
982                 break;
983         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
984                 req.ring_type = ring_type;
985                 /*
986                  * TODO: Some HWRM versions crash with
987                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
988                  */
989                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
990                 break;
991         default:
992                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
993                         ring_type);
994                 HWRM_UNLOCK();
995                 return -1;
996         }
997         req.enables = rte_cpu_to_le_32(enables);
998
999         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1000
1001         if (rc || resp->error_code) {
1002                 if (rc == 0 && resp->error_code)
1003                         rc = rte_le_to_cpu_16(resp->error_code);
1004                 switch (ring_type) {
1005                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1006                         PMD_DRV_LOG(ERR,
1007                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1008                         HWRM_UNLOCK();
1009                         return rc;
1010                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1011                         PMD_DRV_LOG(ERR,
1012                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1013                         HWRM_UNLOCK();
1014                         return rc;
1015                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1016                         PMD_DRV_LOG(ERR,
1017                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1018                         HWRM_UNLOCK();
1019                         return rc;
1020                 default:
1021                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1022                         HWRM_UNLOCK();
1023                         return rc;
1024                 }
1025         }
1026
1027         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1028         HWRM_UNLOCK();
1029         return rc;
1030 }
1031
1032 int bnxt_hwrm_ring_free(struct bnxt *bp,
1033                         struct bnxt_ring *ring, uint32_t ring_type)
1034 {
1035         int rc;
1036         struct hwrm_ring_free_input req = {.req_type = 0 };
1037         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1038
1039         HWRM_PREP(req, RING_FREE);
1040
1041         req.ring_type = ring_type;
1042         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1043
1044         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1045
1046         if (rc || resp->error_code) {
1047                 if (rc == 0 && resp->error_code)
1048                         rc = rte_le_to_cpu_16(resp->error_code);
1049                 HWRM_UNLOCK();
1050
1051                 switch (ring_type) {
1052                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1053                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1054                                 rc);
1055                         return rc;
1056                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1057                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1058                                 rc);
1059                         return rc;
1060                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1061                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1062                                 rc);
1063                         return rc;
1064                 default:
1065                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1066                         return rc;
1067                 }
1068         }
1069         HWRM_UNLOCK();
1070         return 0;
1071 }
1072
1073 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1074 {
1075         int rc = 0;
1076         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1077         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1078
1079         HWRM_PREP(req, RING_GRP_ALLOC);
1080
1081         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1082         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1083         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1084         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1085
1086         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1087
1088         HWRM_CHECK_RESULT();
1089
1090         bp->grp_info[idx].fw_grp_id =
1091             rte_le_to_cpu_16(resp->ring_group_id);
1092
1093         HWRM_UNLOCK();
1094
1095         return rc;
1096 }
1097
1098 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1099 {
1100         int rc;
1101         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1102         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1103
1104         HWRM_PREP(req, RING_GRP_FREE);
1105
1106         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1107
1108         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1109
1110         HWRM_CHECK_RESULT();
1111         HWRM_UNLOCK();
1112
1113         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1114         return rc;
1115 }
1116
1117 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1118 {
1119         int rc = 0;
1120         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1121         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1122
1123         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1124                 return rc;
1125
1126         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1127
1128         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1129
1130         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1131
1132         HWRM_CHECK_RESULT();
1133         HWRM_UNLOCK();
1134
1135         return rc;
1136 }
1137
1138 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1139                                 unsigned int idx __rte_unused)
1140 {
1141         int rc;
1142         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1143         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1144
1145         HWRM_PREP(req, STAT_CTX_ALLOC);
1146
1147         req.update_period_ms = rte_cpu_to_le_32(0);
1148
1149         req.stats_dma_addr =
1150             rte_cpu_to_le_64(cpr->hw_stats_map);
1151
1152         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1153
1154         HWRM_CHECK_RESULT();
1155
1156         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1157
1158         HWRM_UNLOCK();
1159
1160         return rc;
1161 }
1162
1163 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1164                                 unsigned int idx __rte_unused)
1165 {
1166         int rc;
1167         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1168         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1169
1170         HWRM_PREP(req, STAT_CTX_FREE);
1171
1172         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1173
1174         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1175
1176         HWRM_CHECK_RESULT();
1177         HWRM_UNLOCK();
1178
1179         return rc;
1180 }
1181
1182 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1183 {
1184         int rc = 0, i, j;
1185         struct hwrm_vnic_alloc_input req = { 0 };
1186         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1187
1188         /* map ring groups to this vnic */
1189         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1190                 vnic->start_grp_id, vnic->end_grp_id);
1191         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1192                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1193         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1194         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1195         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1196         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1197         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1198                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1199         HWRM_PREP(req, VNIC_ALLOC);
1200
1201         if (vnic->func_default)
1202                 req.flags =
1203                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1204         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1205
1206         HWRM_CHECK_RESULT();
1207
1208         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1209         HWRM_UNLOCK();
1210         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1211         return rc;
1212 }
1213
1214 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1215                                         struct bnxt_vnic_info *vnic,
1216                                         struct bnxt_plcmodes_cfg *pmode)
1217 {
1218         int rc = 0;
1219         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1220         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1221
1222         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1223
1224         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1225
1226         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1227
1228         HWRM_CHECK_RESULT();
1229
1230         pmode->flags = rte_le_to_cpu_32(resp->flags);
1231         /* dflt_vnic bit doesn't exist in the _cfg command */
1232         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1233         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1234         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1235         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1236
1237         HWRM_UNLOCK();
1238
1239         return rc;
1240 }
1241
1242 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1243                                        struct bnxt_vnic_info *vnic,
1244                                        struct bnxt_plcmodes_cfg *pmode)
1245 {
1246         int rc = 0;
1247         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1248         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1249
1250         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1251
1252         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1253         req.flags = rte_cpu_to_le_32(pmode->flags);
1254         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1255         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1256         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1257         req.enables = rte_cpu_to_le_32(
1258             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1259             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1260             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1261         );
1262
1263         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1264
1265         HWRM_CHECK_RESULT();
1266         HWRM_UNLOCK();
1267
1268         return rc;
1269 }
1270
1271 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1272 {
1273         int rc = 0;
1274         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1275         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1276         uint32_t ctx_enable_flag = 0;
1277         struct bnxt_plcmodes_cfg pmodes;
1278
1279         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1280                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1281                 return rc;
1282         }
1283
1284         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1285         if (rc)
1286                 return rc;
1287
1288         HWRM_PREP(req, VNIC_CFG);
1289
1290         /* Only RSS support for now TBD: COS & LB */
1291         req.enables =
1292             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1293         if (vnic->lb_rule != 0xffff)
1294                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1295         if (vnic->cos_rule != 0xffff)
1296                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1297         if (vnic->rss_rule != 0xffff) {
1298                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1299                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1300         }
1301         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1302         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1303         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1304         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1305         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1306         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1307         req.mru = rte_cpu_to_le_16(vnic->mru);
1308         if (vnic->func_default)
1309                 req.flags |=
1310                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1311         if (vnic->vlan_strip)
1312                 req.flags |=
1313                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1314         if (vnic->bd_stall)
1315                 req.flags |=
1316                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1317         if (vnic->roce_dual)
1318                 req.flags |= rte_cpu_to_le_32(
1319                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1320         if (vnic->roce_only)
1321                 req.flags |= rte_cpu_to_le_32(
1322                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1323         if (vnic->rss_dflt_cr)
1324                 req.flags |= rte_cpu_to_le_32(
1325                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1326
1327         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1328
1329         HWRM_CHECK_RESULT();
1330         HWRM_UNLOCK();
1331
1332         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1333
1334         return rc;
1335 }
1336
1337 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1338                 int16_t fw_vf_id)
1339 {
1340         int rc = 0;
1341         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1342         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1343
1344         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1345                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1346                 return rc;
1347         }
1348         HWRM_PREP(req, VNIC_QCFG);
1349
1350         req.enables =
1351                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1352         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1353         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1354
1355         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1356
1357         HWRM_CHECK_RESULT();
1358
1359         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1360         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1361         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1362         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1363         vnic->mru = rte_le_to_cpu_16(resp->mru);
1364         vnic->func_default = rte_le_to_cpu_32(
1365                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1366         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1367                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1368         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1369                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1370         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1371                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1372         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1373                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1374         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1375                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1376
1377         HWRM_UNLOCK();
1378
1379         return rc;
1380 }
1381
1382 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1383 {
1384         int rc = 0;
1385         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1386         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1387                                                 bp->hwrm_cmd_resp_addr;
1388
1389         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1390
1391         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1392
1393         HWRM_CHECK_RESULT();
1394
1395         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1396         HWRM_UNLOCK();
1397         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1398
1399         return rc;
1400 }
1401
1402 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1403 {
1404         int rc = 0;
1405         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1406         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1407                                                 bp->hwrm_cmd_resp_addr;
1408
1409         if (vnic->rss_rule == 0xffff) {
1410                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1411                 return rc;
1412         }
1413         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1414
1415         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1416
1417         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1418
1419         HWRM_CHECK_RESULT();
1420         HWRM_UNLOCK();
1421
1422         vnic->rss_rule = INVALID_HW_RING_ID;
1423
1424         return rc;
1425 }
1426
1427 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1428 {
1429         int rc = 0;
1430         struct hwrm_vnic_free_input req = {.req_type = 0 };
1431         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1432
1433         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1434                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1435                 return rc;
1436         }
1437
1438         HWRM_PREP(req, VNIC_FREE);
1439
1440         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1441
1442         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1443
1444         HWRM_CHECK_RESULT();
1445         HWRM_UNLOCK();
1446
1447         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1448         return rc;
1449 }
1450
1451 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1452                            struct bnxt_vnic_info *vnic)
1453 {
1454         int rc = 0;
1455         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1456         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1457
1458         HWRM_PREP(req, VNIC_RSS_CFG);
1459
1460         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1461
1462         req.ring_grp_tbl_addr =
1463             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1464         req.hash_key_tbl_addr =
1465             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1466         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1467
1468         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1469
1470         HWRM_CHECK_RESULT();
1471         HWRM_UNLOCK();
1472
1473         return rc;
1474 }
1475
1476 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1477                         struct bnxt_vnic_info *vnic)
1478 {
1479         int rc = 0;
1480         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1481         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1482         uint16_t size;
1483
1484         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1485
1486         req.flags = rte_cpu_to_le_32(
1487                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1488
1489         req.enables = rte_cpu_to_le_32(
1490                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1491
1492         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1493         size -= RTE_PKTMBUF_HEADROOM;
1494
1495         req.jumbo_thresh = rte_cpu_to_le_16(size);
1496         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1497
1498         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1499
1500         HWRM_CHECK_RESULT();
1501         HWRM_UNLOCK();
1502
1503         return rc;
1504 }
1505
1506 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1507                         struct bnxt_vnic_info *vnic, bool enable)
1508 {
1509         int rc = 0;
1510         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1511         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1512
1513         HWRM_PREP(req, VNIC_TPA_CFG);
1514
1515         if (enable) {
1516                 req.enables = rte_cpu_to_le_32(
1517                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1518                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1519                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1520                 req.flags = rte_cpu_to_le_32(
1521                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1522                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1523                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1524                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1525                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1526                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1527                 req.max_agg_segs = rte_cpu_to_le_16(5);
1528                 req.max_aggs =
1529                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1530                 req.min_agg_len = rte_cpu_to_le_32(512);
1531         }
1532         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1533
1534         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1535
1536         HWRM_CHECK_RESULT();
1537         HWRM_UNLOCK();
1538
1539         return rc;
1540 }
1541
1542 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1543 {
1544         struct hwrm_func_cfg_input req = {0};
1545         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1546         int rc;
1547
1548         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1549         req.enables = rte_cpu_to_le_32(
1550                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1551         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1552         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1553
1554         HWRM_PREP(req, FUNC_CFG);
1555
1556         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1557         HWRM_CHECK_RESULT();
1558         HWRM_UNLOCK();
1559
1560         bp->pf.vf_info[vf].random_mac = false;
1561
1562         return rc;
1563 }
1564
1565 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1566                                   uint64_t *dropped)
1567 {
1568         int rc = 0;
1569         struct hwrm_func_qstats_input req = {.req_type = 0};
1570         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1571
1572         HWRM_PREP(req, FUNC_QSTATS);
1573
1574         req.fid = rte_cpu_to_le_16(fid);
1575
1576         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1577
1578         HWRM_CHECK_RESULT();
1579
1580         if (dropped)
1581                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1582
1583         HWRM_UNLOCK();
1584
1585         return rc;
1586 }
1587
1588 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1589                           struct rte_eth_stats *stats)
1590 {
1591         int rc = 0;
1592         struct hwrm_func_qstats_input req = {.req_type = 0};
1593         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1594
1595         HWRM_PREP(req, FUNC_QSTATS);
1596
1597         req.fid = rte_cpu_to_le_16(fid);
1598
1599         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1600
1601         HWRM_CHECK_RESULT();
1602
1603         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1604         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1605         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1606         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1607         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1608         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1609
1610         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1611         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1612         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1613         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1614         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1615         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1616
1617         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1618         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1619
1620         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1621
1622         HWRM_UNLOCK();
1623
1624         return rc;
1625 }
1626
1627 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1628 {
1629         int rc = 0;
1630         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1631         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1632
1633         HWRM_PREP(req, FUNC_CLR_STATS);
1634
1635         req.fid = rte_cpu_to_le_16(fid);
1636
1637         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1638
1639         HWRM_CHECK_RESULT();
1640         HWRM_UNLOCK();
1641
1642         return rc;
1643 }
1644
1645 /*
1646  * HWRM utility functions
1647  */
1648
1649 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1650 {
1651         unsigned int i;
1652         int rc = 0;
1653
1654         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1655                 struct bnxt_tx_queue *txq;
1656                 struct bnxt_rx_queue *rxq;
1657                 struct bnxt_cp_ring_info *cpr;
1658
1659                 if (i >= bp->rx_cp_nr_rings) {
1660                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1661                         cpr = txq->cp_ring;
1662                 } else {
1663                         rxq = bp->rx_queues[i];
1664                         cpr = rxq->cp_ring;
1665                 }
1666
1667                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1668                 if (rc)
1669                         return rc;
1670         }
1671         return 0;
1672 }
1673
1674 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1675 {
1676         int rc;
1677         unsigned int i;
1678         struct bnxt_cp_ring_info *cpr;
1679
1680         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1681
1682                 if (i >= bp->rx_cp_nr_rings) {
1683                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1684                 } else {
1685                         cpr = bp->rx_queues[i]->cp_ring;
1686                         bp->grp_info[i].fw_stats_ctx = -1;
1687                 }
1688                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1689                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1690                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1691                         if (rc)
1692                                 return rc;
1693                 }
1694         }
1695         return 0;
1696 }
1697
1698 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1699 {
1700         unsigned int i;
1701         int rc = 0;
1702
1703         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1704                 struct bnxt_tx_queue *txq;
1705                 struct bnxt_rx_queue *rxq;
1706                 struct bnxt_cp_ring_info *cpr;
1707
1708                 if (i >= bp->rx_cp_nr_rings) {
1709                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1710                         cpr = txq->cp_ring;
1711                 } else {
1712                         rxq = bp->rx_queues[i];
1713                         cpr = rxq->cp_ring;
1714                 }
1715
1716                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1717
1718                 if (rc)
1719                         return rc;
1720         }
1721         return rc;
1722 }
1723
1724 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1725 {
1726         uint16_t idx;
1727         uint32_t rc = 0;
1728
1729         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1730
1731                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1732                         continue;
1733
1734                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1735
1736                 if (rc)
1737                         return rc;
1738         }
1739         return rc;
1740 }
1741
1742 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1743                                 unsigned int idx __rte_unused)
1744 {
1745         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1746
1747         bnxt_hwrm_ring_free(bp, cp_ring,
1748                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1749         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1750         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1751                         sizeof(*cpr->cp_desc_ring));
1752         cpr->cp_raw_cons = 0;
1753 }
1754
1755 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1756 {
1757         unsigned int i;
1758         int rc = 0;
1759
1760         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1761                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1762                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1763                 struct bnxt_ring *ring = txr->tx_ring_struct;
1764                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1765                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1766
1767                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1768                         bnxt_hwrm_ring_free(bp, ring,
1769                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1770                         ring->fw_ring_id = INVALID_HW_RING_ID;
1771                         memset(txr->tx_desc_ring, 0,
1772                                         txr->tx_ring_struct->ring_size *
1773                                         sizeof(*txr->tx_desc_ring));
1774                         memset(txr->tx_buf_ring, 0,
1775                                         txr->tx_ring_struct->ring_size *
1776                                         sizeof(*txr->tx_buf_ring));
1777                         txr->tx_prod = 0;
1778                         txr->tx_cons = 0;
1779                 }
1780                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1781                         bnxt_free_cp_ring(bp, cpr, idx);
1782                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1783                 }
1784         }
1785
1786         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1787                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1788                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1789                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1790                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1791                 unsigned int idx = i + 1;
1792
1793                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1794                         bnxt_hwrm_ring_free(bp, ring,
1795                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1796                         ring->fw_ring_id = INVALID_HW_RING_ID;
1797                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1798                         memset(rxr->rx_desc_ring, 0,
1799                                         rxr->rx_ring_struct->ring_size *
1800                                         sizeof(*rxr->rx_desc_ring));
1801                         memset(rxr->rx_buf_ring, 0,
1802                                         rxr->rx_ring_struct->ring_size *
1803                                         sizeof(*rxr->rx_buf_ring));
1804                         rxr->rx_prod = 0;
1805                 }
1806                 ring = rxr->ag_ring_struct;
1807                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1808                         bnxt_hwrm_ring_free(bp, ring,
1809                                             HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1810                         ring->fw_ring_id = INVALID_HW_RING_ID;
1811                         memset(rxr->ag_buf_ring, 0,
1812                                rxr->ag_ring_struct->ring_size *
1813                                sizeof(*rxr->ag_buf_ring));
1814                         rxr->ag_prod = 0;
1815                         bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1816                 }
1817                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1818                         bnxt_free_cp_ring(bp, cpr, idx);
1819                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1820                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1821                 }
1822         }
1823
1824         /* Default completion ring */
1825         {
1826                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1827
1828                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1829                         bnxt_free_cp_ring(bp, cpr, 0);
1830                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1831                 }
1832         }
1833
1834         return rc;
1835 }
1836
1837 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1838 {
1839         uint16_t i;
1840         uint32_t rc = 0;
1841
1842         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1843                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1844                 if (rc)
1845                         return rc;
1846         }
1847         return rc;
1848 }
1849
1850 void bnxt_free_hwrm_resources(struct bnxt *bp)
1851 {
1852         /* Release memzone */
1853         rte_free(bp->hwrm_cmd_resp_addr);
1854         rte_free(bp->hwrm_short_cmd_req_addr);
1855         bp->hwrm_cmd_resp_addr = NULL;
1856         bp->hwrm_short_cmd_req_addr = NULL;
1857         bp->hwrm_cmd_resp_dma_addr = 0;
1858         bp->hwrm_short_cmd_req_dma_addr = 0;
1859 }
1860
1861 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1862 {
1863         struct rte_pci_device *pdev = bp->pdev;
1864         char type[RTE_MEMZONE_NAMESIZE];
1865
1866         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1867                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1868         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1869         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1870         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1871         if (bp->hwrm_cmd_resp_addr == NULL)
1872                 return -ENOMEM;
1873         bp->hwrm_cmd_resp_dma_addr =
1874                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1875         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1876                 PMD_DRV_LOG(ERR,
1877                         "unable to map response address to physical memory\n");
1878                 return -ENOMEM;
1879         }
1880         rte_spinlock_init(&bp->hwrm_lock);
1881
1882         return 0;
1883 }
1884
1885 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1886 {
1887         struct bnxt_filter_info *filter;
1888         int rc = 0;
1889
1890         STAILQ_FOREACH(filter, &vnic->filter, next) {
1891                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1892                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1893                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1894                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1895                 else
1896                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1897                 //if (rc)
1898                         //break;
1899         }
1900         return rc;
1901 }
1902
1903 static int
1904 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1905 {
1906         struct bnxt_filter_info *filter;
1907         struct rte_flow *flow;
1908         int rc = 0;
1909
1910         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1911                 filter = flow->filter;
1912                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1913                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1914                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1915                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1916                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1917                 else
1918                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1919
1920                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1921                 rte_free(flow);
1922                 //if (rc)
1923                         //break;
1924         }
1925         return rc;
1926 }
1927
1928 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1929 {
1930         struct bnxt_filter_info *filter;
1931         int rc = 0;
1932
1933         STAILQ_FOREACH(filter, &vnic->filter, next) {
1934                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1935                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1936                                                      filter);
1937                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1938                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1939                                                          filter);
1940                 else
1941                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1942                                                      filter);
1943                 if (rc)
1944                         break;
1945         }
1946         return rc;
1947 }
1948
1949 void bnxt_free_tunnel_ports(struct bnxt *bp)
1950 {
1951         if (bp->vxlan_port_cnt)
1952                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1953                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1954         bp->vxlan_port = 0;
1955         if (bp->geneve_port_cnt)
1956                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1957                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1958         bp->geneve_port = 0;
1959 }
1960
1961 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1962 {
1963         int i;
1964
1965         if (bp->vnic_info == NULL)
1966                 return;
1967
1968         /*
1969          * Cleanup VNICs in reverse order, to make sure the L2 filter
1970          * from vnic0 is last to be cleaned up.
1971          */
1972         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1973                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1974
1975                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1976
1977                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1978
1979                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1980
1981                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1982
1983                 bnxt_hwrm_vnic_free(bp, vnic);
1984         }
1985         /* Ring resources */
1986         bnxt_free_all_hwrm_rings(bp);
1987         bnxt_free_all_hwrm_ring_grps(bp);
1988         bnxt_free_all_hwrm_stat_ctxs(bp);
1989         bnxt_free_tunnel_ports(bp);
1990 }
1991
1992 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1993 {
1994         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1995
1996         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1997                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1998
1999         switch (conf_link_speed) {
2000         case ETH_LINK_SPEED_10M_HD:
2001         case ETH_LINK_SPEED_100M_HD:
2002                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2003         }
2004         return hw_link_duplex;
2005 }
2006
2007 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2008 {
2009         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2010 }
2011
2012 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2013 {
2014         uint16_t eth_link_speed = 0;
2015
2016         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2017                 return ETH_LINK_SPEED_AUTONEG;
2018
2019         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2020         case ETH_LINK_SPEED_100M:
2021         case ETH_LINK_SPEED_100M_HD:
2022                 eth_link_speed =
2023                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2024                 break;
2025         case ETH_LINK_SPEED_1G:
2026                 eth_link_speed =
2027                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2028                 break;
2029         case ETH_LINK_SPEED_2_5G:
2030                 eth_link_speed =
2031                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2032                 break;
2033         case ETH_LINK_SPEED_10G:
2034                 eth_link_speed =
2035                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2036                 break;
2037         case ETH_LINK_SPEED_20G:
2038                 eth_link_speed =
2039                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2040                 break;
2041         case ETH_LINK_SPEED_25G:
2042                 eth_link_speed =
2043                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2044                 break;
2045         case ETH_LINK_SPEED_40G:
2046                 eth_link_speed =
2047                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2048                 break;
2049         case ETH_LINK_SPEED_50G:
2050                 eth_link_speed =
2051                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2052                 break;
2053         case ETH_LINK_SPEED_100G:
2054                 eth_link_speed =
2055                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2056                 break;
2057         default:
2058                 PMD_DRV_LOG(ERR,
2059                         "Unsupported link speed %d; default to AUTO\n",
2060                         conf_link_speed);
2061                 break;
2062         }
2063         return eth_link_speed;
2064 }
2065
2066 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2067                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2068                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2069                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2070
2071 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2072 {
2073         uint32_t one_speed;
2074
2075         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2076                 return 0;
2077
2078         if (link_speed & ETH_LINK_SPEED_FIXED) {
2079                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2080
2081                 if (one_speed & (one_speed - 1)) {
2082                         PMD_DRV_LOG(ERR,
2083                                 "Invalid advertised speeds (%u) for port %u\n",
2084                                 link_speed, port_id);
2085                         return -EINVAL;
2086                 }
2087                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2088                         PMD_DRV_LOG(ERR,
2089                                 "Unsupported advertised speed (%u) for port %u\n",
2090                                 link_speed, port_id);
2091                         return -EINVAL;
2092                 }
2093         } else {
2094                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2095                         PMD_DRV_LOG(ERR,
2096                                 "Unsupported advertised speeds (%u) for port %u\n",
2097                                 link_speed, port_id);
2098                         return -EINVAL;
2099                 }
2100         }
2101         return 0;
2102 }
2103
2104 static uint16_t
2105 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2106 {
2107         uint16_t ret = 0;
2108
2109         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2110                 if (bp->link_info.support_speeds)
2111                         return bp->link_info.support_speeds;
2112                 link_speed = BNXT_SUPPORTED_SPEEDS;
2113         }
2114
2115         if (link_speed & ETH_LINK_SPEED_100M)
2116                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2117         if (link_speed & ETH_LINK_SPEED_100M_HD)
2118                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2119         if (link_speed & ETH_LINK_SPEED_1G)
2120                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2121         if (link_speed & ETH_LINK_SPEED_2_5G)
2122                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2123         if (link_speed & ETH_LINK_SPEED_10G)
2124                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2125         if (link_speed & ETH_LINK_SPEED_20G)
2126                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2127         if (link_speed & ETH_LINK_SPEED_25G)
2128                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2129         if (link_speed & ETH_LINK_SPEED_40G)
2130                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2131         if (link_speed & ETH_LINK_SPEED_50G)
2132                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2133         if (link_speed & ETH_LINK_SPEED_100G)
2134                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2135         return ret;
2136 }
2137
2138 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2139 {
2140         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2141
2142         switch (hw_link_speed) {
2143         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2144                 eth_link_speed = ETH_SPEED_NUM_100M;
2145                 break;
2146         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2147                 eth_link_speed = ETH_SPEED_NUM_1G;
2148                 break;
2149         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2150                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2151                 break;
2152         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2153                 eth_link_speed = ETH_SPEED_NUM_10G;
2154                 break;
2155         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2156                 eth_link_speed = ETH_SPEED_NUM_20G;
2157                 break;
2158         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2159                 eth_link_speed = ETH_SPEED_NUM_25G;
2160                 break;
2161         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2162                 eth_link_speed = ETH_SPEED_NUM_40G;
2163                 break;
2164         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2165                 eth_link_speed = ETH_SPEED_NUM_50G;
2166                 break;
2167         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2168                 eth_link_speed = ETH_SPEED_NUM_100G;
2169                 break;
2170         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2171         default:
2172                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2173                         hw_link_speed);
2174                 break;
2175         }
2176         return eth_link_speed;
2177 }
2178
2179 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2180 {
2181         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2182
2183         switch (hw_link_duplex) {
2184         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2185         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2186                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2187                 break;
2188         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2189                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2190                 break;
2191         default:
2192                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2193                         hw_link_duplex);
2194                 break;
2195         }
2196         return eth_link_duplex;
2197 }
2198
2199 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2200 {
2201         int rc = 0;
2202         struct bnxt_link_info *link_info = &bp->link_info;
2203
2204         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2205         if (rc) {
2206                 PMD_DRV_LOG(ERR,
2207                         "Get link config failed with rc %d\n", rc);
2208                 goto exit;
2209         }
2210         if (link_info->link_speed)
2211                 link->link_speed =
2212                         bnxt_parse_hw_link_speed(link_info->link_speed);
2213         else
2214                 link->link_speed = ETH_SPEED_NUM_NONE;
2215         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2216         link->link_status = link_info->link_up;
2217         link->link_autoneg = link_info->auto_mode ==
2218                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2219                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2220 exit:
2221         return rc;
2222 }
2223
2224 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2225 {
2226         int rc = 0;
2227         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2228         struct bnxt_link_info link_req;
2229         uint16_t speed, autoneg;
2230
2231         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2232                 return 0;
2233
2234         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2235                         bp->eth_dev->data->port_id);
2236         if (rc)
2237                 goto error;
2238
2239         memset(&link_req, 0, sizeof(link_req));
2240         link_req.link_up = link_up;
2241         if (!link_up)
2242                 goto port_phy_cfg;
2243
2244         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2245         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2246         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2247         /* Autoneg can be done only when the FW allows */
2248         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2249                                 bp->link_info.force_link_speed)) {
2250                 link_req.phy_flags |=
2251                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2252                 link_req.auto_link_speed_mask =
2253                         bnxt_parse_eth_link_speed_mask(bp,
2254                                                        dev_conf->link_speeds);
2255         } else {
2256                 if (bp->link_info.phy_type ==
2257                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2258                     bp->link_info.phy_type ==
2259                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2260                     bp->link_info.media_type ==
2261                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2262                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2263                         return -EINVAL;
2264                 }
2265
2266                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2267                 /* If user wants a particular speed try that first. */
2268                 if (speed)
2269                         link_req.link_speed = speed;
2270                 else if (bp->link_info.force_link_speed)
2271                         link_req.link_speed = bp->link_info.force_link_speed;
2272                 else
2273                         link_req.link_speed = bp->link_info.auto_link_speed;
2274         }
2275         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2276         link_req.auto_pause = bp->link_info.auto_pause;
2277         link_req.force_pause = bp->link_info.force_pause;
2278
2279 port_phy_cfg:
2280         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2281         if (rc) {
2282                 PMD_DRV_LOG(ERR,
2283                         "Set link config failed with rc %d\n", rc);
2284         }
2285
2286 error:
2287         return rc;
2288 }
2289
2290 /* JIRA 22088 */
2291 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2292 {
2293         struct hwrm_func_qcfg_input req = {0};
2294         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2295         uint16_t flags;
2296         int rc = 0;
2297
2298         HWRM_PREP(req, FUNC_QCFG);
2299         req.fid = rte_cpu_to_le_16(0xffff);
2300
2301         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2302
2303         HWRM_CHECK_RESULT();
2304
2305         /* Hard Coded.. 0xfff VLAN ID mask */
2306         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2307         flags = rte_le_to_cpu_16(resp->flags);
2308         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2309                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2310
2311         switch (resp->port_partition_type) {
2312         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2313         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2314         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2315                 bp->port_partition_type = resp->port_partition_type;
2316                 break;
2317         default:
2318                 bp->port_partition_type = 0;
2319                 break;
2320         }
2321
2322         HWRM_UNLOCK();
2323
2324         return rc;
2325 }
2326
2327 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2328                                    struct hwrm_func_qcaps_output *qcaps)
2329 {
2330         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2331         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2332                sizeof(qcaps->mac_address));
2333         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2334         qcaps->max_rx_rings = fcfg->num_rx_rings;
2335         qcaps->max_tx_rings = fcfg->num_tx_rings;
2336         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2337         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2338         qcaps->max_vfs = 0;
2339         qcaps->first_vf_id = 0;
2340         qcaps->max_vnics = fcfg->num_vnics;
2341         qcaps->max_decap_records = 0;
2342         qcaps->max_encap_records = 0;
2343         qcaps->max_tx_wm_flows = 0;
2344         qcaps->max_tx_em_flows = 0;
2345         qcaps->max_rx_wm_flows = 0;
2346         qcaps->max_rx_em_flows = 0;
2347         qcaps->max_flow_id = 0;
2348         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2349         qcaps->max_sp_tx_rings = 0;
2350         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2351 }
2352
2353 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2354 {
2355         struct hwrm_func_cfg_input req = {0};
2356         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2357         int rc;
2358
2359         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2360                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2361                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2362                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2363                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2364                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2365                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2366                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2367                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2368                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2369         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2370         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2371         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2372                                    ETHER_CRC_LEN + VLAN_TAG_SIZE *
2373                                    BNXT_NUM_VLANS);
2374         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2375         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2376         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2377         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2378         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2379         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2380         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2381         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2382         req.fid = rte_cpu_to_le_16(0xffff);
2383
2384         HWRM_PREP(req, FUNC_CFG);
2385
2386         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2387
2388         HWRM_CHECK_RESULT();
2389         HWRM_UNLOCK();
2390
2391         return rc;
2392 }
2393
2394 static void populate_vf_func_cfg_req(struct bnxt *bp,
2395                                      struct hwrm_func_cfg_input *req,
2396                                      int num_vfs)
2397 {
2398         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2399                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2400                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2401                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2402                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2403                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2404                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2405                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2406                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2407                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2408
2409         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2410                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2411                                     BNXT_NUM_VLANS);
2412         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2413                                     ETHER_CRC_LEN + VLAN_TAG_SIZE *
2414                                     BNXT_NUM_VLANS);
2415         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2416                                                 (num_vfs + 1));
2417         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2418         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2419                                                (num_vfs + 1));
2420         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2421         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2422         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2423         /* TODO: For now, do not support VMDq/RFS on VFs. */
2424         req->num_vnics = rte_cpu_to_le_16(1);
2425         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2426                                                  (num_vfs + 1));
2427 }
2428
2429 static void add_random_mac_if_needed(struct bnxt *bp,
2430                                      struct hwrm_func_cfg_input *cfg_req,
2431                                      int vf)
2432 {
2433         struct ether_addr mac;
2434
2435         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2436                 return;
2437
2438         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2439                 cfg_req->enables |=
2440                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2441                 eth_random_addr(cfg_req->dflt_mac_addr);
2442                 bp->pf.vf_info[vf].random_mac = true;
2443         } else {
2444                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2445         }
2446 }
2447
2448 static void reserve_resources_from_vf(struct bnxt *bp,
2449                                       struct hwrm_func_cfg_input *cfg_req,
2450                                       int vf)
2451 {
2452         struct hwrm_func_qcaps_input req = {0};
2453         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2454         int rc;
2455
2456         /* Get the actual allocated values now */
2457         HWRM_PREP(req, FUNC_QCAPS);
2458         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2459         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2460
2461         if (rc) {
2462                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2463                 copy_func_cfg_to_qcaps(cfg_req, resp);
2464         } else if (resp->error_code) {
2465                 rc = rte_le_to_cpu_16(resp->error_code);
2466                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2467                 copy_func_cfg_to_qcaps(cfg_req, resp);
2468         }
2469
2470         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2471         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2472         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2473         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2474         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2475         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2476         /*
2477          * TODO: While not supporting VMDq with VFs, max_vnics is always
2478          * forced to 1 in this case
2479          */
2480         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2481         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2482
2483         HWRM_UNLOCK();
2484 }
2485
2486 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2487 {
2488         struct hwrm_func_qcfg_input req = {0};
2489         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2490         int rc;
2491
2492         /* Check for zero MAC address */
2493         HWRM_PREP(req, FUNC_QCFG);
2494         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2495         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2496         if (rc) {
2497                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2498                 return -1;
2499         } else if (resp->error_code) {
2500                 rc = rte_le_to_cpu_16(resp->error_code);
2501                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2502                 return -1;
2503         }
2504         rc = rte_le_to_cpu_16(resp->vlan);
2505
2506         HWRM_UNLOCK();
2507
2508         return rc;
2509 }
2510
2511 static int update_pf_resource_max(struct bnxt *bp)
2512 {
2513         struct hwrm_func_qcfg_input req = {0};
2514         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2515         int rc;
2516
2517         /* And copy the allocated numbers into the pf struct */
2518         HWRM_PREP(req, FUNC_QCFG);
2519         req.fid = rte_cpu_to_le_16(0xffff);
2520         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2521         HWRM_CHECK_RESULT();
2522
2523         /* Only TX ring value reflects actual allocation? TODO */
2524         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2525         bp->pf.evb_mode = resp->evb_mode;
2526
2527         HWRM_UNLOCK();
2528
2529         return rc;
2530 }
2531
2532 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2533 {
2534         int rc;
2535
2536         if (!BNXT_PF(bp)) {
2537                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2538                 return -1;
2539         }
2540
2541         rc = bnxt_hwrm_func_qcaps(bp);
2542         if (rc)
2543                 return rc;
2544
2545         bp->pf.func_cfg_flags &=
2546                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2547                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2548         bp->pf.func_cfg_flags |=
2549                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2550         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2551         return rc;
2552 }
2553
2554 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2555 {
2556         struct hwrm_func_cfg_input req = {0};
2557         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2558         int i;
2559         size_t sz;
2560         int rc = 0;
2561         size_t req_buf_sz;
2562
2563         if (!BNXT_PF(bp)) {
2564                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2565                 return -1;
2566         }
2567
2568         rc = bnxt_hwrm_func_qcaps(bp);
2569
2570         if (rc)
2571                 return rc;
2572
2573         bp->pf.active_vfs = num_vfs;
2574
2575         /*
2576          * First, configure the PF to only use one TX ring.  This ensures that
2577          * there are enough rings for all VFs.
2578          *
2579          * If we don't do this, when we call func_alloc() later, we will lock
2580          * extra rings to the PF that won't be available during func_cfg() of
2581          * the VFs.
2582          *
2583          * This has been fixed with firmware versions above 20.6.54
2584          */
2585         bp->pf.func_cfg_flags &=
2586                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2587                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2588         bp->pf.func_cfg_flags |=
2589                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2590         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2591         if (rc)
2592                 return rc;
2593
2594         /*
2595          * Now, create and register a buffer to hold forwarded VF requests
2596          */
2597         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2598         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2599                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2600         if (bp->pf.vf_req_buf == NULL) {
2601                 rc = -ENOMEM;
2602                 goto error_free;
2603         }
2604         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2605                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2606         for (i = 0; i < num_vfs; i++)
2607                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2608                                         (i * HWRM_MAX_REQ_LEN);
2609
2610         rc = bnxt_hwrm_func_buf_rgtr(bp);
2611         if (rc)
2612                 goto error_free;
2613
2614         populate_vf_func_cfg_req(bp, &req, num_vfs);
2615
2616         bp->pf.active_vfs = 0;
2617         for (i = 0; i < num_vfs; i++) {
2618                 add_random_mac_if_needed(bp, &req, i);
2619
2620                 HWRM_PREP(req, FUNC_CFG);
2621                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2622                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2623                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2624
2625                 /* Clear enable flag for next pass */
2626                 req.enables &= ~rte_cpu_to_le_32(
2627                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2628
2629                 if (rc || resp->error_code) {
2630                         PMD_DRV_LOG(ERR,
2631                                 "Failed to initizlie VF %d\n", i);
2632                         PMD_DRV_LOG(ERR,
2633                                 "Not all VFs available. (%d, %d)\n",
2634                                 rc, resp->error_code);
2635                         HWRM_UNLOCK();
2636                         break;
2637                 }
2638
2639                 HWRM_UNLOCK();
2640
2641                 reserve_resources_from_vf(bp, &req, i);
2642                 bp->pf.active_vfs++;
2643                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2644         }
2645
2646         /*
2647          * Now configure the PF to use "the rest" of the resources
2648          * We're using STD_TX_RING_MODE here though which will limit the TX
2649          * rings.  This will allow QoS to function properly.  Not setting this
2650          * will cause PF rings to break bandwidth settings.
2651          */
2652         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2653         if (rc)
2654                 goto error_free;
2655
2656         rc = update_pf_resource_max(bp);
2657         if (rc)
2658                 goto error_free;
2659
2660         return rc;
2661
2662 error_free:
2663         bnxt_hwrm_func_buf_unrgtr(bp);
2664         return rc;
2665 }
2666
2667 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2668 {
2669         struct hwrm_func_cfg_input req = {0};
2670         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2671         int rc;
2672
2673         HWRM_PREP(req, FUNC_CFG);
2674
2675         req.fid = rte_cpu_to_le_16(0xffff);
2676         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2677         req.evb_mode = bp->pf.evb_mode;
2678
2679         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2680         HWRM_CHECK_RESULT();
2681         HWRM_UNLOCK();
2682
2683         return rc;
2684 }
2685
2686 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2687                                 uint8_t tunnel_type)
2688 {
2689         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2690         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2691         int rc = 0;
2692
2693         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2694         req.tunnel_type = tunnel_type;
2695         req.tunnel_dst_port_val = port;
2696         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2697         HWRM_CHECK_RESULT();
2698
2699         switch (tunnel_type) {
2700         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2701                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2702                 bp->vxlan_port = port;
2703                 break;
2704         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2705                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2706                 bp->geneve_port = port;
2707                 break;
2708         default:
2709                 break;
2710         }
2711
2712         HWRM_UNLOCK();
2713
2714         return rc;
2715 }
2716
2717 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2718                                 uint8_t tunnel_type)
2719 {
2720         struct hwrm_tunnel_dst_port_free_input req = {0};
2721         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2722         int rc = 0;
2723
2724         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2725
2726         req.tunnel_type = tunnel_type;
2727         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2728         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2729
2730         HWRM_CHECK_RESULT();
2731         HWRM_UNLOCK();
2732
2733         return rc;
2734 }
2735
2736 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2737                                         uint32_t flags)
2738 {
2739         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2740         struct hwrm_func_cfg_input req = {0};
2741         int rc;
2742
2743         HWRM_PREP(req, FUNC_CFG);
2744
2745         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2746         req.flags = rte_cpu_to_le_32(flags);
2747         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2748
2749         HWRM_CHECK_RESULT();
2750         HWRM_UNLOCK();
2751
2752         return rc;
2753 }
2754
2755 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2756 {
2757         uint32_t *flag = flagp;
2758
2759         vnic->flags = *flag;
2760 }
2761
2762 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2763 {
2764         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2765 }
2766
2767 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2768 {
2769         int rc = 0;
2770         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2771         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2772
2773         HWRM_PREP(req, FUNC_BUF_RGTR);
2774
2775         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2776         req.req_buf_page_size = rte_cpu_to_le_16(
2777                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2778         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2779         req.req_buf_page_addr[0] =
2780                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2781         if (req.req_buf_page_addr[0] == 0) {
2782                 PMD_DRV_LOG(ERR,
2783                         "unable to map buffer address to physical memory\n");
2784                 return -ENOMEM;
2785         }
2786
2787         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2788
2789         HWRM_CHECK_RESULT();
2790         HWRM_UNLOCK();
2791
2792         return rc;
2793 }
2794
2795 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2796 {
2797         int rc = 0;
2798         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2799         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2800
2801         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2802
2803         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2804
2805         HWRM_CHECK_RESULT();
2806         HWRM_UNLOCK();
2807
2808         return rc;
2809 }
2810
2811 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2812 {
2813         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2814         struct hwrm_func_cfg_input req = {0};
2815         int rc;
2816
2817         HWRM_PREP(req, FUNC_CFG);
2818
2819         req.fid = rte_cpu_to_le_16(0xffff);
2820         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2821         req.enables = rte_cpu_to_le_32(
2822                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2823         req.async_event_cr = rte_cpu_to_le_16(
2824                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2825         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2826
2827         HWRM_CHECK_RESULT();
2828         HWRM_UNLOCK();
2829
2830         return rc;
2831 }
2832
2833 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2834 {
2835         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2836         struct hwrm_func_vf_cfg_input req = {0};
2837         int rc;
2838
2839         HWRM_PREP(req, FUNC_VF_CFG);
2840
2841         req.enables = rte_cpu_to_le_32(
2842                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2843         req.async_event_cr = rte_cpu_to_le_16(
2844                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2845         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2846
2847         HWRM_CHECK_RESULT();
2848         HWRM_UNLOCK();
2849
2850         return rc;
2851 }
2852
2853 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2854 {
2855         struct hwrm_func_cfg_input req = {0};
2856         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2857         uint16_t dflt_vlan, fid;
2858         uint32_t func_cfg_flags;
2859         int rc = 0;
2860
2861         HWRM_PREP(req, FUNC_CFG);
2862
2863         if (is_vf) {
2864                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2865                 fid = bp->pf.vf_info[vf].fid;
2866                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2867         } else {
2868                 fid = rte_cpu_to_le_16(0xffff);
2869                 func_cfg_flags = bp->pf.func_cfg_flags;
2870                 dflt_vlan = bp->vlan;
2871         }
2872
2873         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2874         req.fid = rte_cpu_to_le_16(fid);
2875         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2876         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2877
2878         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2879
2880         HWRM_CHECK_RESULT();
2881         HWRM_UNLOCK();
2882
2883         return rc;
2884 }
2885
2886 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2887                         uint16_t max_bw, uint16_t enables)
2888 {
2889         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2890         struct hwrm_func_cfg_input req = {0};
2891         int rc;
2892
2893         HWRM_PREP(req, FUNC_CFG);
2894
2895         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2896         req.enables |= rte_cpu_to_le_32(enables);
2897         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2898         req.max_bw = rte_cpu_to_le_32(max_bw);
2899         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2900
2901         HWRM_CHECK_RESULT();
2902         HWRM_UNLOCK();
2903
2904         return rc;
2905 }
2906
2907 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2908 {
2909         struct hwrm_func_cfg_input req = {0};
2910         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2911         int rc = 0;
2912
2913         HWRM_PREP(req, FUNC_CFG);
2914
2915         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2916         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2917         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2918         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2919
2920         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2921
2922         HWRM_CHECK_RESULT();
2923         HWRM_UNLOCK();
2924
2925         return rc;
2926 }
2927
2928 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2929                               void *encaped, size_t ec_size)
2930 {
2931         int rc = 0;
2932         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2933         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2934
2935         if (ec_size > sizeof(req.encap_request))
2936                 return -1;
2937
2938         HWRM_PREP(req, REJECT_FWD_RESP);
2939
2940         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2941         memcpy(req.encap_request, encaped, ec_size);
2942
2943         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2944
2945         HWRM_CHECK_RESULT();
2946         HWRM_UNLOCK();
2947
2948         return rc;
2949 }
2950
2951 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2952                                        struct ether_addr *mac)
2953 {
2954         struct hwrm_func_qcfg_input req = {0};
2955         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2956         int rc;
2957
2958         HWRM_PREP(req, FUNC_QCFG);
2959
2960         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2961         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2962
2963         HWRM_CHECK_RESULT();
2964
2965         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2966
2967         HWRM_UNLOCK();
2968
2969         return rc;
2970 }
2971
2972 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2973                             void *encaped, size_t ec_size)
2974 {
2975         int rc = 0;
2976         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2977         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2978
2979         if (ec_size > sizeof(req.encap_request))
2980                 return -1;
2981
2982         HWRM_PREP(req, EXEC_FWD_RESP);
2983
2984         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2985         memcpy(req.encap_request, encaped, ec_size);
2986
2987         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2988
2989         HWRM_CHECK_RESULT();
2990         HWRM_UNLOCK();
2991
2992         return rc;
2993 }
2994
2995 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2996                          struct rte_eth_stats *stats, uint8_t rx)
2997 {
2998         int rc = 0;
2999         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3000         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3001
3002         HWRM_PREP(req, STAT_CTX_QUERY);
3003
3004         req.stat_ctx_id = rte_cpu_to_le_32(cid);
3005
3006         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3007
3008         HWRM_CHECK_RESULT();
3009
3010         if (rx) {
3011                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3012                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3013                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3014                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3015                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3016                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3017                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3018                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3019         } else {
3020                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3021                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3022                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3023                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3024                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3025                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3026                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3027         }
3028
3029
3030         HWRM_UNLOCK();
3031
3032         return rc;
3033 }
3034
3035 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3036 {
3037         struct hwrm_port_qstats_input req = {0};
3038         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3039         struct bnxt_pf_info *pf = &bp->pf;
3040         int rc;
3041
3042         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3043                 return 0;
3044
3045         HWRM_PREP(req, PORT_QSTATS);
3046
3047         req.port_id = rte_cpu_to_le_16(pf->port_id);
3048         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3049         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3050         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3051
3052         HWRM_CHECK_RESULT();
3053         HWRM_UNLOCK();
3054
3055         return rc;
3056 }
3057
3058 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3059 {
3060         struct hwrm_port_clr_stats_input req = {0};
3061         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3062         struct bnxt_pf_info *pf = &bp->pf;
3063         int rc;
3064
3065         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3066                 return 0;
3067
3068         HWRM_PREP(req, PORT_CLR_STATS);
3069
3070         req.port_id = rte_cpu_to_le_16(pf->port_id);
3071         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3072
3073         HWRM_CHECK_RESULT();
3074         HWRM_UNLOCK();
3075
3076         return rc;
3077 }
3078
3079 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3080 {
3081         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3082         struct hwrm_port_led_qcaps_input req = {0};
3083         int rc;
3084
3085         if (BNXT_VF(bp))
3086                 return 0;
3087
3088         HWRM_PREP(req, PORT_LED_QCAPS);
3089         req.port_id = bp->pf.port_id;
3090         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3091
3092         HWRM_CHECK_RESULT();
3093
3094         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3095                 unsigned int i;
3096
3097                 bp->num_leds = resp->num_leds;
3098                 memcpy(bp->leds, &resp->led0_id,
3099                         sizeof(bp->leds[0]) * bp->num_leds);
3100                 for (i = 0; i < bp->num_leds; i++) {
3101                         struct bnxt_led_info *led = &bp->leds[i];
3102
3103                         uint16_t caps = led->led_state_caps;
3104
3105                         if (!led->led_group_id ||
3106                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3107                                 bp->num_leds = 0;
3108                                 break;
3109                         }
3110                 }
3111         }
3112
3113         HWRM_UNLOCK();
3114
3115         return rc;
3116 }
3117
3118 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3119 {
3120         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3121         struct hwrm_port_led_cfg_input req = {0};
3122         struct bnxt_led_cfg *led_cfg;
3123         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3124         uint16_t duration = 0;
3125         int rc, i;
3126
3127         if (!bp->num_leds || BNXT_VF(bp))
3128                 return -EOPNOTSUPP;
3129
3130         HWRM_PREP(req, PORT_LED_CFG);
3131
3132         if (led_on) {
3133                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3134                 duration = rte_cpu_to_le_16(500);
3135         }
3136         req.port_id = bp->pf.port_id;
3137         req.num_leds = bp->num_leds;
3138         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3139         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3140                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3141                 led_cfg->led_id = bp->leds[i].led_id;
3142                 led_cfg->led_state = led_state;
3143                 led_cfg->led_blink_on = duration;
3144                 led_cfg->led_blink_off = duration;
3145                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3146         }
3147
3148         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3149
3150         HWRM_CHECK_RESULT();
3151         HWRM_UNLOCK();
3152
3153         return rc;
3154 }
3155
3156 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3157                                uint32_t *length)
3158 {
3159         int rc;
3160         struct hwrm_nvm_get_dir_info_input req = {0};
3161         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3162
3163         HWRM_PREP(req, NVM_GET_DIR_INFO);
3164
3165         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3166
3167         HWRM_CHECK_RESULT();
3168         HWRM_UNLOCK();
3169
3170         if (!rc) {
3171                 *entries = rte_le_to_cpu_32(resp->entries);
3172                 *length = rte_le_to_cpu_32(resp->entry_length);
3173         }
3174         return rc;
3175 }
3176
3177 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3178 {
3179         int rc;
3180         uint32_t dir_entries;
3181         uint32_t entry_length;
3182         uint8_t *buf;
3183         size_t buflen;
3184         rte_iova_t dma_handle;
3185         struct hwrm_nvm_get_dir_entries_input req = {0};
3186         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3187
3188         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3189         if (rc != 0)
3190                 return rc;
3191
3192         *data++ = dir_entries;
3193         *data++ = entry_length;
3194         len -= 2;
3195         memset(data, 0xff, len);
3196
3197         buflen = dir_entries * entry_length;
3198         buf = rte_malloc("nvm_dir", buflen, 0);
3199         rte_mem_lock_page(buf);
3200         if (buf == NULL)
3201                 return -ENOMEM;
3202         dma_handle = rte_mem_virt2iova(buf);
3203         if (dma_handle == 0) {
3204                 PMD_DRV_LOG(ERR,
3205                         "unable to map response address to physical memory\n");
3206                 return -ENOMEM;
3207         }
3208         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3209         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3210         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3211
3212         HWRM_CHECK_RESULT();
3213         HWRM_UNLOCK();
3214
3215         if (rc == 0)
3216                 memcpy(data, buf, len > buflen ? buflen : len);
3217
3218         rte_free(buf);
3219
3220         return rc;
3221 }
3222
3223 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3224                              uint32_t offset, uint32_t length,
3225                              uint8_t *data)
3226 {
3227         int rc;
3228         uint8_t *buf;
3229         rte_iova_t dma_handle;
3230         struct hwrm_nvm_read_input req = {0};
3231         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3232
3233         buf = rte_malloc("nvm_item", length, 0);
3234         rte_mem_lock_page(buf);
3235         if (!buf)
3236                 return -ENOMEM;
3237
3238         dma_handle = rte_mem_virt2iova(buf);
3239         if (dma_handle == 0) {
3240                 PMD_DRV_LOG(ERR,
3241                         "unable to map response address to physical memory\n");
3242                 return -ENOMEM;
3243         }
3244         HWRM_PREP(req, NVM_READ);
3245         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3246         req.dir_idx = rte_cpu_to_le_16(index);
3247         req.offset = rte_cpu_to_le_32(offset);
3248         req.len = rte_cpu_to_le_32(length);
3249         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3250         HWRM_CHECK_RESULT();
3251         HWRM_UNLOCK();
3252         if (rc == 0)
3253                 memcpy(data, buf, length);
3254
3255         rte_free(buf);
3256         return rc;
3257 }
3258
3259 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3260 {
3261         int rc;
3262         struct hwrm_nvm_erase_dir_entry_input req = {0};
3263         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3264
3265         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3266         req.dir_idx = rte_cpu_to_le_16(index);
3267         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3268         HWRM_CHECK_RESULT();
3269         HWRM_UNLOCK();
3270
3271         return rc;
3272 }
3273
3274
3275 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3276                           uint16_t dir_ordinal, uint16_t dir_ext,
3277                           uint16_t dir_attr, const uint8_t *data,
3278                           size_t data_len)
3279 {
3280         int rc;
3281         struct hwrm_nvm_write_input req = {0};
3282         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3283         rte_iova_t dma_handle;
3284         uint8_t *buf;
3285
3286         HWRM_PREP(req, NVM_WRITE);
3287
3288         req.dir_type = rte_cpu_to_le_16(dir_type);
3289         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3290         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3291         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3292         req.dir_data_length = rte_cpu_to_le_32(data_len);
3293
3294         buf = rte_malloc("nvm_write", data_len, 0);
3295         rte_mem_lock_page(buf);
3296         if (!buf)
3297                 return -ENOMEM;
3298
3299         dma_handle = rte_mem_virt2iova(buf);
3300         if (dma_handle == 0) {
3301                 PMD_DRV_LOG(ERR,
3302                         "unable to map response address to physical memory\n");
3303                 return -ENOMEM;
3304         }
3305         memcpy(buf, data, data_len);
3306         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3307
3308         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3309
3310         HWRM_CHECK_RESULT();
3311         HWRM_UNLOCK();
3312
3313         rte_free(buf);
3314         return rc;
3315 }
3316
3317 static void
3318 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3319 {
3320         uint32_t *count = cbdata;
3321
3322         *count = *count + 1;
3323 }
3324
3325 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3326                                      struct bnxt_vnic_info *vnic __rte_unused)
3327 {
3328         return 0;
3329 }
3330
3331 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3332 {
3333         uint32_t count = 0;
3334
3335         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3336             &count, bnxt_vnic_count_hwrm_stub);
3337
3338         return count;
3339 }
3340
3341 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3342                                         uint16_t *vnic_ids)
3343 {
3344         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3345         struct hwrm_func_vf_vnic_ids_query_output *resp =
3346                                                 bp->hwrm_cmd_resp_addr;
3347         int rc;
3348
3349         /* First query all VNIC ids */
3350         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3351
3352         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3353         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3354         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3355
3356         if (req.vnic_id_tbl_addr == 0) {
3357                 HWRM_UNLOCK();
3358                 PMD_DRV_LOG(ERR,
3359                 "unable to map VNIC ID table address to physical memory\n");
3360                 return -ENOMEM;
3361         }
3362         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3363         if (rc) {
3364                 HWRM_UNLOCK();
3365                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3366                 return -1;
3367         } else if (resp->error_code) {
3368                 rc = rte_le_to_cpu_16(resp->error_code);
3369                 HWRM_UNLOCK();
3370                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3371                 return -1;
3372         }
3373         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3374
3375         HWRM_UNLOCK();
3376
3377         return rc;
3378 }
3379
3380 /*
3381  * This function queries the VNIC IDs  for a specified VF. It then calls
3382  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3383  * Then it calls the hwrm_cb function to program this new vnic configuration.
3384  */
3385 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3386         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3387         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3388 {
3389         struct bnxt_vnic_info vnic;
3390         int rc = 0;
3391         int i, num_vnic_ids;
3392         uint16_t *vnic_ids;
3393         size_t vnic_id_sz;
3394         size_t sz;
3395
3396         /* First query all VNIC ids */
3397         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3398         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3399                         RTE_CACHE_LINE_SIZE);
3400         if (vnic_ids == NULL) {
3401                 rc = -ENOMEM;
3402                 return rc;
3403         }
3404         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3405                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3406
3407         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3408
3409         if (num_vnic_ids < 0)
3410                 return num_vnic_ids;
3411
3412         /* Retrieve VNIC, update bd_stall then update */
3413
3414         for (i = 0; i < num_vnic_ids; i++) {
3415                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3416                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3417                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3418                 if (rc)
3419                         break;
3420                 if (vnic.mru <= 4)      /* Indicates unallocated */
3421                         continue;
3422
3423                 vnic_cb(&vnic, cbdata);
3424
3425                 rc = hwrm_cb(bp, &vnic);
3426                 if (rc)
3427                         break;
3428         }
3429
3430         rte_free(vnic_ids);
3431
3432         return rc;
3433 }
3434
3435 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3436                                               bool on)
3437 {
3438         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3439         struct hwrm_func_cfg_input req = {0};
3440         int rc;
3441
3442         HWRM_PREP(req, FUNC_CFG);
3443
3444         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3445         req.enables |= rte_cpu_to_le_32(
3446                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3447         req.vlan_antispoof_mode = on ?
3448                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3449                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3450         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3451
3452         HWRM_CHECK_RESULT();
3453         HWRM_UNLOCK();
3454
3455         return rc;
3456 }
3457
3458 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3459 {
3460         struct bnxt_vnic_info vnic;
3461         uint16_t *vnic_ids;
3462         size_t vnic_id_sz;
3463         int num_vnic_ids, i;
3464         size_t sz;
3465         int rc;
3466
3467         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3468         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3469                         RTE_CACHE_LINE_SIZE);
3470         if (vnic_ids == NULL) {
3471                 rc = -ENOMEM;
3472                 return rc;
3473         }
3474
3475         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3476                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3477
3478         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3479         if (rc <= 0)
3480                 goto exit;
3481         num_vnic_ids = rc;
3482
3483         /*
3484          * Loop through to find the default VNIC ID.
3485          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3486          * by sending the hwrm_func_qcfg command to the firmware.
3487          */
3488         for (i = 0; i < num_vnic_ids; i++) {
3489                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3490                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3491                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3492                                         bp->pf.first_vf_id + vf);
3493                 if (rc)
3494                         goto exit;
3495                 if (vnic.func_default) {
3496                         rte_free(vnic_ids);
3497                         return vnic.fw_vnic_id;
3498                 }
3499         }
3500         /* Could not find a default VNIC. */
3501         PMD_DRV_LOG(ERR, "No default VNIC\n");
3502 exit:
3503         rte_free(vnic_ids);
3504         return -1;
3505 }
3506
3507 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3508                          uint16_t dst_id,
3509                          struct bnxt_filter_info *filter)
3510 {
3511         int rc = 0;
3512         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3513         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3514         uint32_t enables = 0;
3515
3516         if (filter->fw_em_filter_id != UINT64_MAX)
3517                 bnxt_hwrm_clear_em_filter(bp, filter);
3518
3519         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3520
3521         req.flags = rte_cpu_to_le_32(filter->flags);
3522
3523         enables = filter->enables |
3524               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3525         req.dst_id = rte_cpu_to_le_16(dst_id);
3526
3527         if (filter->ip_addr_type) {
3528                 req.ip_addr_type = filter->ip_addr_type;
3529                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3530         }
3531         if (enables &
3532             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3533                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3534         if (enables &
3535             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3536                 memcpy(req.src_macaddr, filter->src_macaddr,
3537                        ETHER_ADDR_LEN);
3538         if (enables &
3539             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3540                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3541                        ETHER_ADDR_LEN);
3542         if (enables &
3543             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3544                 req.ovlan_vid = filter->l2_ovlan;
3545         if (enables &
3546             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3547                 req.ivlan_vid = filter->l2_ivlan;
3548         if (enables &
3549             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3550                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3551         if (enables &
3552             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3553                 req.ip_protocol = filter->ip_protocol;
3554         if (enables &
3555             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3556                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3557         if (enables &
3558             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3559                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3560         if (enables &
3561             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3562                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3563         if (enables &
3564             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3565                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3566         if (enables &
3567             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3568                 req.mirror_vnic_id = filter->mirror_vnic_id;
3569
3570         req.enables = rte_cpu_to_le_32(enables);
3571
3572         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3573
3574         HWRM_CHECK_RESULT();
3575
3576         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3577         HWRM_UNLOCK();
3578
3579         return rc;
3580 }
3581
3582 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3583 {
3584         int rc = 0;
3585         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3586         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3587
3588         if (filter->fw_em_filter_id == UINT64_MAX)
3589                 return 0;
3590
3591         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3592         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3593
3594         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3595
3596         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3597
3598         HWRM_CHECK_RESULT();
3599         HWRM_UNLOCK();
3600
3601         filter->fw_em_filter_id = UINT64_MAX;
3602         filter->fw_l2_filter_id = UINT64_MAX;
3603
3604         return 0;
3605 }
3606
3607 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3608                          uint16_t dst_id,
3609                          struct bnxt_filter_info *filter)
3610 {
3611         int rc = 0;
3612         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3613         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3614                                                 bp->hwrm_cmd_resp_addr;
3615         uint32_t enables = 0;
3616
3617         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3618                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3619
3620         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3621
3622         req.flags = rte_cpu_to_le_32(filter->flags);
3623
3624         enables = filter->enables |
3625               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3626         req.dst_id = rte_cpu_to_le_16(dst_id);
3627
3628
3629         if (filter->ip_addr_type) {
3630                 req.ip_addr_type = filter->ip_addr_type;
3631                 enables |=
3632                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3633         }
3634         if (enables &
3635             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3636                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3637         if (enables &
3638             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3639                 memcpy(req.src_macaddr, filter->src_macaddr,
3640                        ETHER_ADDR_LEN);
3641         //if (enables &
3642             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3643                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3644                        //ETHER_ADDR_LEN);
3645         if (enables &
3646             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3647                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3648         if (enables &
3649             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3650                 req.ip_protocol = filter->ip_protocol;
3651         if (enables &
3652             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3653                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3654         if (enables &
3655             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3656                 req.src_ipaddr_mask[0] =
3657                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3658         if (enables &
3659             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3660                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3661         if (enables &
3662             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3663                 req.dst_ipaddr_mask[0] =
3664                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3665         if (enables &
3666             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3667                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3668         if (enables &
3669             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3670                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3671         if (enables &
3672             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3673                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3674         if (enables &
3675             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3676                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3677         if (enables &
3678             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3679                 req.mirror_vnic_id = filter->mirror_vnic_id;
3680
3681         req.enables = rte_cpu_to_le_32(enables);
3682
3683         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3684
3685         HWRM_CHECK_RESULT();
3686
3687         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3688         HWRM_UNLOCK();
3689
3690         return rc;
3691 }
3692
3693 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3694                                 struct bnxt_filter_info *filter)
3695 {
3696         int rc = 0;
3697         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3698         struct hwrm_cfa_ntuple_filter_free_output *resp =
3699                                                 bp->hwrm_cmd_resp_addr;
3700
3701         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3702                 return 0;
3703
3704         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3705
3706         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3707
3708         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3709
3710         HWRM_CHECK_RESULT();
3711         HWRM_UNLOCK();
3712
3713         filter->fw_ntuple_filter_id = UINT64_MAX;
3714         filter->fw_l2_filter_id = UINT64_MAX;
3715
3716         return 0;
3717 }
3718
3719 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3720 {
3721         unsigned int rss_idx, fw_idx, i;
3722
3723         if (vnic->rss_table && vnic->hash_type) {
3724                 /*
3725                  * Fill the RSS hash & redirection table with
3726                  * ring group ids for all VNICs
3727                  */
3728                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3729                         rss_idx++, fw_idx++) {
3730                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3731                                 fw_idx %= bp->rx_cp_nr_rings;
3732                                 if (vnic->fw_grp_ids[fw_idx] !=
3733                                     INVALID_HW_RING_ID)
3734                                         break;
3735                                 fw_idx++;
3736                         }
3737                         if (i == bp->rx_cp_nr_rings)
3738                                 return 0;
3739                         vnic->rss_table[rss_idx] =
3740                                 vnic->fw_grp_ids[fw_idx];
3741                 }
3742                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3743         }
3744         return 0;
3745 }