net/bnxt: fix endianness of flag
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT                10000
30
31 struct bnxt_plcmodes_cfg {
32         uint32_t        flags;
33         uint16_t        jumbo_thresh;
34         uint16_t        hds_offset;
35         uint16_t        hds_threshold;
36 };
37
38 static int page_getenum(size_t size)
39 {
40         if (size <= 1 << 4)
41                 return 4;
42         if (size <= 1 << 12)
43                 return 12;
44         if (size <= 1 << 13)
45                 return 13;
46         if (size <= 1 << 16)
47                 return 16;
48         if (size <= 1 << 21)
49                 return 21;
50         if (size <= 1 << 22)
51                 return 22;
52         if (size <= 1 << 30)
53                 return 30;
54         PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55         return sizeof(void *) * 8 - 1;
56 }
57
58 static int page_roundup(size_t size)
59 {
60         return 1 << page_getenum(size);
61 }
62
63 /*
64  * HWRM Functions (sent to HWRM)
65  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
66  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
67  * command was failed by the ChiMP.
68  */
69
70 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
71                                         uint32_t msg_len)
72 {
73         unsigned int i;
74         struct input *req = msg;
75         struct output *resp = bp->hwrm_cmd_resp_addr;
76         uint32_t *data = msg;
77         uint8_t *bar;
78         uint8_t *valid;
79         uint16_t max_req_len = bp->max_req_len;
80         struct hwrm_short_input short_input = { 0 };
81
82         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
83                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
84
85                 memset(short_cmd_req, 0, bp->max_req_len);
86                 memcpy(short_cmd_req, req, msg_len);
87
88                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
89                 short_input.signature = rte_cpu_to_le_16(
90                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
91                 short_input.size = rte_cpu_to_le_16(msg_len);
92                 short_input.req_addr =
93                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
94
95                 data = (uint32_t *)&short_input;
96                 msg_len = sizeof(short_input);
97
98                 /* Sync memory write before updating doorbell */
99                 rte_wmb();
100
101                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
102         }
103
104         /* Write request msg to hwrm channel */
105         for (i = 0; i < msg_len; i += 4) {
106                 bar = (uint8_t *)bp->bar0 + i;
107                 rte_write32(*data, bar);
108                 data++;
109         }
110
111         /* Zero the rest of the request space */
112         for (; i < max_req_len; i += 4) {
113                 bar = (uint8_t *)bp->bar0 + i;
114                 rte_write32(0, bar);
115         }
116
117         /* Ring channel doorbell */
118         bar = (uint8_t *)bp->bar0 + 0x100;
119         rte_write32(1, bar);
120
121         /* Poll for the valid bit */
122         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
123                 /* Sanity check on the resp->resp_len */
124                 rte_rmb();
125                 if (resp->resp_len && resp->resp_len <=
126                                 bp->max_resp_len) {
127                         /* Last byte of resp contains the valid key */
128                         valid = (uint8_t *)resp + resp->resp_len - 1;
129                         if (*valid == HWRM_RESP_VALID_KEY)
130                                 break;
131                 }
132                 rte_delay_us(600);
133         }
134
135         if (i >= HWRM_CMD_TIMEOUT) {
136                 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
137                         req->req_type);
138                 goto err_ret;
139         }
140         return 0;
141
142 err_ret:
143         return -1;
144 }
145
146 /*
147  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
148  * spinlock, and does initial processing.
149  *
150  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
151  * releases the spinlock only if it returns.  If the regular int return codes
152  * are not used by the function, HWRM_CHECK_RESULT() should not be used
153  * directly, rather it should be copied and modified to suit the function.
154  *
155  * HWRM_UNLOCK() must be called after all response processing is completed.
156  */
157 #define HWRM_PREP(req, type) do { \
158         rte_spinlock_lock(&bp->hwrm_lock); \
159         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
160         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
161         req.cmpl_ring = rte_cpu_to_le_16(-1); \
162         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
163         req.target_id = rte_cpu_to_le_16(0xffff); \
164         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
165 } while (0)
166
167 #define HWRM_CHECK_RESULT() do {\
168         if (rc) { \
169                 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
170                 rte_spinlock_unlock(&bp->hwrm_lock); \
171                 return rc; \
172         } \
173         if (resp->error_code) { \
174                 rc = rte_le_to_cpu_16(resp->error_code); \
175                 if (resp->resp_len >= 16) { \
176                         struct hwrm_err_output *tmp_hwrm_err_op = \
177                                                 (void *)resp; \
178                         PMD_DRV_LOG(ERR, \
179                                 "error %d:%d:%08x:%04x\n", \
180                                 rc, tmp_hwrm_err_op->cmd_err, \
181                                 rte_le_to_cpu_32(\
182                                         tmp_hwrm_err_op->opaque_0), \
183                                 rte_le_to_cpu_16(\
184                                         tmp_hwrm_err_op->opaque_1)); \
185                 } else { \
186                         PMD_DRV_LOG(ERR, "error %d\n", rc); \
187                 } \
188                 rte_spinlock_unlock(&bp->hwrm_lock); \
189                 return rc; \
190         } \
191 } while (0)
192
193 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
194
195 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
196 {
197         int rc = 0;
198         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
199         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
200
201         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
202         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
203         req.mask = 0;
204
205         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
206
207         HWRM_CHECK_RESULT();
208         HWRM_UNLOCK();
209
210         return rc;
211 }
212
213 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
214                                  struct bnxt_vnic_info *vnic,
215                                  uint16_t vlan_count,
216                                  struct bnxt_vlan_table_entry *vlan_table)
217 {
218         int rc = 0;
219         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
220         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
221         uint32_t mask = 0;
222
223         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
224         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
225
226         /* FIXME add multicast flag, when multicast adding options is supported
227          * by ethtool.
228          */
229         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
230                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
231         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
232                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
233         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
234                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
235         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
236                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
237         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
238                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
239         if (vnic->mc_addr_cnt) {
240                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
241                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
242                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
243         }
244         if (vlan_table) {
245                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
246                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
247                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
248                          rte_mem_virt2iova(vlan_table));
249                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
250         }
251         req.mask = rte_cpu_to_le_32(mask);
252
253         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
254
255         HWRM_CHECK_RESULT();
256         HWRM_UNLOCK();
257
258         return rc;
259 }
260
261 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
262                         uint16_t vlan_count,
263                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
264 {
265         int rc = 0;
266         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
267         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
268                                                 bp->hwrm_cmd_resp_addr;
269
270         /*
271          * Older HWRM versions did not support this command, and the set_rx_mask
272          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
273          * removed from set_rx_mask call, and this command was added.
274          *
275          * This command is also present from 1.7.8.11 and higher,
276          * as well as 1.7.8.0
277          */
278         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
279                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
280                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
281                                         (11)))
282                                 return 0;
283                 }
284         }
285         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
286         req.fid = rte_cpu_to_le_16(fid);
287
288         req.vlan_tag_mask_tbl_addr =
289                 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
290         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
291
292         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
293
294         HWRM_CHECK_RESULT();
295         HWRM_UNLOCK();
296
297         return rc;
298 }
299
300 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
301                            struct bnxt_filter_info *filter)
302 {
303         int rc = 0;
304         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
305         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
306
307         if (filter->fw_l2_filter_id == UINT64_MAX)
308                 return 0;
309
310         HWRM_PREP(req, CFA_L2_FILTER_FREE);
311
312         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
313
314         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
315
316         HWRM_CHECK_RESULT();
317         HWRM_UNLOCK();
318
319         filter->fw_l2_filter_id = -1;
320
321         return 0;
322 }
323
324 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
325                          uint16_t dst_id,
326                          struct bnxt_filter_info *filter)
327 {
328         int rc = 0;
329         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
330         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
331         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
332         const struct rte_eth_vmdq_rx_conf *conf =
333                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
334         uint32_t enables = 0;
335         uint16_t j = dst_id - 1;
336
337         //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
338         if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
339             conf->pool_map[j].pools & (1UL << j)) {
340                 PMD_DRV_LOG(DEBUG,
341                         "Add vlan %u to vmdq pool %u\n",
342                         conf->pool_map[j].vlan_id, j);
343
344                 filter->l2_ivlan = conf->pool_map[j].vlan_id;
345                 filter->enables |=
346                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
347                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
348         }
349
350         if (filter->fw_l2_filter_id != UINT64_MAX)
351                 bnxt_hwrm_clear_l2_filter(bp, filter);
352
353         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
354
355         req.flags = rte_cpu_to_le_32(filter->flags);
356
357         enables = filter->enables |
358               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
359         req.dst_id = rte_cpu_to_le_16(dst_id);
360
361         if (enables &
362             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
363                 memcpy(req.l2_addr, filter->l2_addr,
364                        ETHER_ADDR_LEN);
365         if (enables &
366             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
367                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
368                        ETHER_ADDR_LEN);
369         if (enables &
370             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
371                 req.l2_ovlan = filter->l2_ovlan;
372         if (enables &
373             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
374                 req.l2_ovlan = filter->l2_ivlan;
375         if (enables &
376             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
377                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
378         if (enables &
379             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
380                 req.l2_ovlan_mask = filter->l2_ivlan_mask;
381         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
382                 req.src_id = rte_cpu_to_le_32(filter->src_id);
383         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
384                 req.src_type = filter->src_type;
385
386         req.enables = rte_cpu_to_le_32(enables);
387
388         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
389
390         HWRM_CHECK_RESULT();
391
392         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
393         HWRM_UNLOCK();
394
395         return rc;
396 }
397
398 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
399 {
400         struct hwrm_port_mac_cfg_input req = {.req_type = 0};
401         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
402         uint32_t flags = 0;
403         int rc;
404
405         if (!ptp)
406                 return 0;
407
408         HWRM_PREP(req, PORT_MAC_CFG);
409
410         if (ptp->rx_filter)
411                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
412         else
413                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
414         if (ptp->tx_tstamp_en)
415                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
416         else
417                 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
418         req.flags = rte_cpu_to_le_32(flags);
419         req.enables =
420         rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
421         req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
422
423         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
424         HWRM_UNLOCK();
425
426         return rc;
427 }
428
429 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
430 {
431         int rc = 0;
432         struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
433         struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
434         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
435
436 /*      if (bp->hwrm_spec_code < 0x10801 || ptp)  TBD  */
437         if (ptp)
438                 return 0;
439
440         HWRM_PREP(req, PORT_MAC_PTP_QCFG);
441
442         req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
443
444         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
445
446         HWRM_CHECK_RESULT();
447
448         if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
449                 return 0;
450
451         ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
452         if (!ptp)
453                 return -ENOMEM;
454
455         ptp->rx_regs[BNXT_PTP_RX_TS_L] =
456                 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
457         ptp->rx_regs[BNXT_PTP_RX_TS_H] =
458                 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
459         ptp->rx_regs[BNXT_PTP_RX_SEQ] =
460                 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
461         ptp->rx_regs[BNXT_PTP_RX_FIFO] =
462                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
463         ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
464                 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
465         ptp->tx_regs[BNXT_PTP_TX_TS_L] =
466                 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
467         ptp->tx_regs[BNXT_PTP_TX_TS_H] =
468                 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
469         ptp->tx_regs[BNXT_PTP_TX_SEQ] =
470                 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
471         ptp->tx_regs[BNXT_PTP_TX_FIFO] =
472                 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
473
474         ptp->bp = bp;
475         bp->ptp_cfg = ptp;
476
477         return 0;
478 }
479
480 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
481 {
482         int rc = 0;
483         struct hwrm_func_qcaps_input req = {.req_type = 0 };
484         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
485         uint16_t new_max_vfs;
486         uint32_t flags;
487         int i;
488
489         HWRM_PREP(req, FUNC_QCAPS);
490
491         req.fid = rte_cpu_to_le_16(0xffff);
492
493         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
494
495         HWRM_CHECK_RESULT();
496
497         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
498         flags = rte_le_to_cpu_32(resp->flags);
499         if (BNXT_PF(bp)) {
500                 bp->pf.port_id = resp->port_id;
501                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
502                 new_max_vfs = bp->pdev->max_vfs;
503                 if (new_max_vfs != bp->pf.max_vfs) {
504                         if (bp->pf.vf_info)
505                                 rte_free(bp->pf.vf_info);
506                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
507                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
508                         bp->pf.max_vfs = new_max_vfs;
509                         for (i = 0; i < new_max_vfs; i++) {
510                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
511                                 bp->pf.vf_info[i].vlan_table =
512                                         rte_zmalloc("VF VLAN table",
513                                                     getpagesize(),
514                                                     getpagesize());
515                                 if (bp->pf.vf_info[i].vlan_table == NULL)
516                                         PMD_DRV_LOG(ERR,
517                                         "Fail to alloc VLAN table for VF %d\n",
518                                         i);
519                                 else
520                                         rte_mem_lock_page(
521                                                 bp->pf.vf_info[i].vlan_table);
522                                 bp->pf.vf_info[i].vlan_as_table =
523                                         rte_zmalloc("VF VLAN AS table",
524                                                     getpagesize(),
525                                                     getpagesize());
526                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
527                                         PMD_DRV_LOG(ERR,
528                                         "Alloc VLAN AS table for VF %d fail\n",
529                                         i);
530                                 else
531                                         rte_mem_lock_page(
532                                                bp->pf.vf_info[i].vlan_as_table);
533                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
534                         }
535                 }
536         }
537
538         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
539         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
540         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
541         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
542         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
543         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
544         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
545         /* TODO: For now, do not support VMDq/RFS on VFs. */
546         if (BNXT_PF(bp)) {
547                 if (bp->pf.max_vfs)
548                         bp->max_vnics = 1;
549                 else
550                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
551         } else {
552                 bp->max_vnics = 1;
553         }
554         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
555         if (BNXT_PF(bp)) {
556                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
557                 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
558                         bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
559                         PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
560                         HWRM_UNLOCK();
561                         bnxt_hwrm_ptp_qcfg(bp);
562                 }
563         }
564
565         HWRM_UNLOCK();
566
567         return rc;
568 }
569
570 int bnxt_hwrm_func_reset(struct bnxt *bp)
571 {
572         int rc = 0;
573         struct hwrm_func_reset_input req = {.req_type = 0 };
574         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
575
576         HWRM_PREP(req, FUNC_RESET);
577
578         req.enables = rte_cpu_to_le_32(0);
579
580         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
581
582         HWRM_CHECK_RESULT();
583         HWRM_UNLOCK();
584
585         return rc;
586 }
587
588 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
589 {
590         int rc;
591         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
592         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
593
594         if (bp->flags & BNXT_FLAG_REGISTERED)
595                 return 0;
596
597         HWRM_PREP(req, FUNC_DRV_RGTR);
598         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
599                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
600         req.ver_maj = RTE_VER_YEAR;
601         req.ver_min = RTE_VER_MONTH;
602         req.ver_upd = RTE_VER_MINOR;
603
604         if (BNXT_PF(bp)) {
605                 req.enables |= rte_cpu_to_le_32(
606                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
607                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
608                        RTE_MIN(sizeof(req.vf_req_fwd),
609                                sizeof(bp->pf.vf_req_fwd)));
610         }
611
612         req.async_event_fwd[0] |=
613                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
614                                  ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
615                                  ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
616         req.async_event_fwd[1] |=
617                 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
618                                  ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
619
620         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
621
622         HWRM_CHECK_RESULT();
623         HWRM_UNLOCK();
624
625         bp->flags |= BNXT_FLAG_REGISTERED;
626
627         return rc;
628 }
629
630 int bnxt_hwrm_ver_get(struct bnxt *bp)
631 {
632         int rc = 0;
633         struct hwrm_ver_get_input req = {.req_type = 0 };
634         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
635         uint32_t my_version;
636         uint32_t fw_version;
637         uint16_t max_resp_len;
638         char type[RTE_MEMZONE_NAMESIZE];
639         uint32_t dev_caps_cfg;
640
641         bp->max_req_len = HWRM_MAX_REQ_LEN;
642         HWRM_PREP(req, VER_GET);
643
644         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
645         req.hwrm_intf_min = HWRM_VERSION_MINOR;
646         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
647
648         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
649
650         HWRM_CHECK_RESULT();
651
652         PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
653                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
654                 resp->hwrm_intf_upd,
655                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
656         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
657                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
658         PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
659                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
660
661         my_version = HWRM_VERSION_MAJOR << 16;
662         my_version |= HWRM_VERSION_MINOR << 8;
663         my_version |= HWRM_VERSION_UPDATE;
664
665         fw_version = resp->hwrm_intf_maj << 16;
666         fw_version |= resp->hwrm_intf_min << 8;
667         fw_version |= resp->hwrm_intf_upd;
668
669         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
670                 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
671                 rc = -EINVAL;
672                 goto error;
673         }
674
675         if (my_version != fw_version) {
676                 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
677                 if (my_version < fw_version) {
678                         PMD_DRV_LOG(INFO,
679                                 "Firmware API version is newer than driver.\n");
680                         PMD_DRV_LOG(INFO,
681                                 "The driver may be missing features.\n");
682                 } else {
683                         PMD_DRV_LOG(INFO,
684                                 "Firmware API version is older than driver.\n");
685                         PMD_DRV_LOG(INFO,
686                                 "Not all driver features may be functional.\n");
687                 }
688         }
689
690         if (bp->max_req_len > resp->max_req_win_len) {
691                 PMD_DRV_LOG(ERR, "Unsupported request length\n");
692                 rc = -EINVAL;
693         }
694         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
695         max_resp_len = resp->max_resp_len;
696         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
697
698         if (bp->max_resp_len != max_resp_len) {
699                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
700                         bp->pdev->addr.domain, bp->pdev->addr.bus,
701                         bp->pdev->addr.devid, bp->pdev->addr.function);
702
703                 rte_free(bp->hwrm_cmd_resp_addr);
704
705                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
706                 if (bp->hwrm_cmd_resp_addr == NULL) {
707                         rc = -ENOMEM;
708                         goto error;
709                 }
710                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
711                 bp->hwrm_cmd_resp_dma_addr =
712                         rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
713                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
714                         PMD_DRV_LOG(ERR,
715                         "Unable to map response buffer to physical memory.\n");
716                         rc = -ENOMEM;
717                         goto error;
718                 }
719                 bp->max_resp_len = max_resp_len;
720         }
721
722         if ((dev_caps_cfg &
723                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
724             (dev_caps_cfg &
725              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
726                 PMD_DRV_LOG(DEBUG, "Short command supported\n");
727
728                 rte_free(bp->hwrm_short_cmd_req_addr);
729
730                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
731                                                         bp->max_req_len, 0);
732                 if (bp->hwrm_short_cmd_req_addr == NULL) {
733                         rc = -ENOMEM;
734                         goto error;
735                 }
736                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
737                 bp->hwrm_short_cmd_req_dma_addr =
738                         rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
739                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
740                         rte_free(bp->hwrm_short_cmd_req_addr);
741                         PMD_DRV_LOG(ERR,
742                                 "Unable to map buffer to physical memory.\n");
743                         rc = -ENOMEM;
744                         goto error;
745                 }
746
747                 bp->flags |= BNXT_FLAG_SHORT_CMD;
748         }
749
750 error:
751         HWRM_UNLOCK();
752         return rc;
753 }
754
755 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
756 {
757         int rc;
758         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
759         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
760
761         if (!(bp->flags & BNXT_FLAG_REGISTERED))
762                 return 0;
763
764         HWRM_PREP(req, FUNC_DRV_UNRGTR);
765         req.flags = flags;
766
767         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
768
769         HWRM_CHECK_RESULT();
770         HWRM_UNLOCK();
771
772         bp->flags &= ~BNXT_FLAG_REGISTERED;
773
774         return rc;
775 }
776
777 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
778 {
779         int rc = 0;
780         struct hwrm_port_phy_cfg_input req = {0};
781         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
782         uint32_t enables = 0;
783
784         HWRM_PREP(req, PORT_PHY_CFG);
785
786         if (conf->link_up) {
787                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
788                 if (bp->link_info.auto_mode && conf->link_speed) {
789                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
790                         PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
791                 }
792
793                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
794                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
795                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
796                 /*
797                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
798                  * any auto mode, even "none".
799                  */
800                 if (!conf->link_speed) {
801                         /* No speeds specified. Enable AutoNeg - all speeds */
802                         req.auto_mode =
803                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
804                 }
805                 /* AutoNeg - Advertise speeds specified. */
806                 if (conf->auto_link_speed_mask &&
807                     !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
808                         req.auto_mode =
809                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
810                         req.auto_link_speed_mask =
811                                 conf->auto_link_speed_mask;
812                         enables |=
813                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
814                 }
815
816                 req.auto_duplex = conf->duplex;
817                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
818                 req.auto_pause = conf->auto_pause;
819                 req.force_pause = conf->force_pause;
820                 /* Set force_pause if there is no auto or if there is a force */
821                 if (req.auto_pause && !req.force_pause)
822                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
823                 else
824                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
825
826                 req.enables = rte_cpu_to_le_32(enables);
827         } else {
828                 req.flags =
829                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
830                 PMD_DRV_LOG(INFO, "Force Link Down\n");
831         }
832
833         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
834
835         HWRM_CHECK_RESULT();
836         HWRM_UNLOCK();
837
838         return rc;
839 }
840
841 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
842                                    struct bnxt_link_info *link_info)
843 {
844         int rc = 0;
845         struct hwrm_port_phy_qcfg_input req = {0};
846         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
847
848         HWRM_PREP(req, PORT_PHY_QCFG);
849
850         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
851
852         HWRM_CHECK_RESULT();
853
854         link_info->phy_link_status = resp->link;
855         link_info->link_up =
856                 (link_info->phy_link_status ==
857                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
858         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
859         link_info->duplex = resp->duplex_cfg;
860         link_info->pause = resp->pause;
861         link_info->auto_pause = resp->auto_pause;
862         link_info->force_pause = resp->force_pause;
863         link_info->auto_mode = resp->auto_mode;
864         link_info->phy_type = resp->phy_type;
865         link_info->media_type = resp->media_type;
866
867         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
868         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
869         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
870         link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
871         link_info->phy_ver[0] = resp->phy_maj;
872         link_info->phy_ver[1] = resp->phy_min;
873         link_info->phy_ver[2] = resp->phy_bld;
874
875         HWRM_UNLOCK();
876
877         PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
878         PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
879         PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
880         PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
881         PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
882                     link_info->auto_link_speed_mask);
883         PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
884                     link_info->force_link_speed);
885
886         return rc;
887 }
888
889 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
890 {
891         int rc = 0;
892         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
893         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
894
895         HWRM_PREP(req, QUEUE_QPORTCFG);
896
897         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
898
899         HWRM_CHECK_RESULT();
900
901 #define GET_QUEUE_INFO(x) \
902         bp->cos_queue[x].id = resp->queue_id##x; \
903         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
904
905         GET_QUEUE_INFO(0);
906         GET_QUEUE_INFO(1);
907         GET_QUEUE_INFO(2);
908         GET_QUEUE_INFO(3);
909         GET_QUEUE_INFO(4);
910         GET_QUEUE_INFO(5);
911         GET_QUEUE_INFO(6);
912         GET_QUEUE_INFO(7);
913
914         HWRM_UNLOCK();
915
916         return rc;
917 }
918
919 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
920                          struct bnxt_ring *ring,
921                          uint32_t ring_type, uint32_t map_index,
922                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
923 {
924         int rc = 0;
925         uint32_t enables = 0;
926         struct hwrm_ring_alloc_input req = {.req_type = 0 };
927         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
928
929         HWRM_PREP(req, RING_ALLOC);
930
931         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
932         req.fbo = rte_cpu_to_le_32(0);
933         /* Association of ring index with doorbell index */
934         req.logical_id = rte_cpu_to_le_16(map_index);
935         req.length = rte_cpu_to_le_32(ring->ring_size);
936
937         switch (ring_type) {
938         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
939                 req.queue_id = bp->cos_queue[0].id;
940                 /* FALLTHROUGH */
941         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
942                 req.ring_type = ring_type;
943                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
944                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
945                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
946                         enables |=
947                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
948                 break;
949         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
950                 req.ring_type = ring_type;
951                 /*
952                  * TODO: Some HWRM versions crash with
953                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
954                  */
955                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
956                 break;
957         default:
958                 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
959                         ring_type);
960                 HWRM_UNLOCK();
961                 return -1;
962         }
963         req.enables = rte_cpu_to_le_32(enables);
964
965         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
966
967         if (rc || resp->error_code) {
968                 if (rc == 0 && resp->error_code)
969                         rc = rte_le_to_cpu_16(resp->error_code);
970                 switch (ring_type) {
971                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
972                         PMD_DRV_LOG(ERR,
973                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
974                         HWRM_UNLOCK();
975                         return rc;
976                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
977                         PMD_DRV_LOG(ERR,
978                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
979                         HWRM_UNLOCK();
980                         return rc;
981                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
982                         PMD_DRV_LOG(ERR,
983                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
984                         HWRM_UNLOCK();
985                         return rc;
986                 default:
987                         PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
988                         HWRM_UNLOCK();
989                         return rc;
990                 }
991         }
992
993         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
994         HWRM_UNLOCK();
995         return rc;
996 }
997
998 int bnxt_hwrm_ring_free(struct bnxt *bp,
999                         struct bnxt_ring *ring, uint32_t ring_type)
1000 {
1001         int rc;
1002         struct hwrm_ring_free_input req = {.req_type = 0 };
1003         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1004
1005         HWRM_PREP(req, RING_FREE);
1006
1007         req.ring_type = ring_type;
1008         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1009
1010         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1011
1012         if (rc || resp->error_code) {
1013                 if (rc == 0 && resp->error_code)
1014                         rc = rte_le_to_cpu_16(resp->error_code);
1015                 HWRM_UNLOCK();
1016
1017                 switch (ring_type) {
1018                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1019                         PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1020                                 rc);
1021                         return rc;
1022                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1023                         PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1024                                 rc);
1025                         return rc;
1026                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1027                         PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1028                                 rc);
1029                         return rc;
1030                 default:
1031                         PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1032                         return rc;
1033                 }
1034         }
1035         HWRM_UNLOCK();
1036         return 0;
1037 }
1038
1039 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1040 {
1041         int rc = 0;
1042         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1043         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1044
1045         HWRM_PREP(req, RING_GRP_ALLOC);
1046
1047         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1048         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1049         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1050         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1051
1052         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1053
1054         HWRM_CHECK_RESULT();
1055
1056         bp->grp_info[idx].fw_grp_id =
1057             rte_le_to_cpu_16(resp->ring_group_id);
1058
1059         HWRM_UNLOCK();
1060
1061         return rc;
1062 }
1063
1064 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1065 {
1066         int rc;
1067         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1068         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1069
1070         HWRM_PREP(req, RING_GRP_FREE);
1071
1072         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1073
1074         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1075
1076         HWRM_CHECK_RESULT();
1077         HWRM_UNLOCK();
1078
1079         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1080         return rc;
1081 }
1082
1083 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1084 {
1085         int rc = 0;
1086         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1087         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1088
1089         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1090                 return rc;
1091
1092         HWRM_PREP(req, STAT_CTX_CLR_STATS);
1093
1094         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1095
1096         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1097
1098         HWRM_CHECK_RESULT();
1099         HWRM_UNLOCK();
1100
1101         return rc;
1102 }
1103
1104 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1105                                 unsigned int idx __rte_unused)
1106 {
1107         int rc;
1108         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1109         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1110
1111         HWRM_PREP(req, STAT_CTX_ALLOC);
1112
1113         req.update_period_ms = rte_cpu_to_le_32(0);
1114
1115         req.stats_dma_addr =
1116             rte_cpu_to_le_64(cpr->hw_stats_map);
1117
1118         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1119
1120         HWRM_CHECK_RESULT();
1121
1122         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1123
1124         HWRM_UNLOCK();
1125
1126         return rc;
1127 }
1128
1129 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1130                                 unsigned int idx __rte_unused)
1131 {
1132         int rc;
1133         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1134         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1135
1136         HWRM_PREP(req, STAT_CTX_FREE);
1137
1138         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1139
1140         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1141
1142         HWRM_CHECK_RESULT();
1143         HWRM_UNLOCK();
1144
1145         return rc;
1146 }
1147
1148 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1149 {
1150         int rc = 0, i, j;
1151         struct hwrm_vnic_alloc_input req = { 0 };
1152         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1153
1154         /* map ring groups to this vnic */
1155         PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1156                 vnic->start_grp_id, vnic->end_grp_id);
1157         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1158                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1159         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1160         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1161         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1162         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1163         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1164                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1165         HWRM_PREP(req, VNIC_ALLOC);
1166
1167         if (vnic->func_default)
1168                 req.flags =
1169                         rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1170         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1171
1172         HWRM_CHECK_RESULT();
1173
1174         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1175         HWRM_UNLOCK();
1176         PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1177         return rc;
1178 }
1179
1180 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1181                                         struct bnxt_vnic_info *vnic,
1182                                         struct bnxt_plcmodes_cfg *pmode)
1183 {
1184         int rc = 0;
1185         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1186         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1187
1188         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1189
1190         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1191
1192         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1193
1194         HWRM_CHECK_RESULT();
1195
1196         pmode->flags = rte_le_to_cpu_32(resp->flags);
1197         /* dflt_vnic bit doesn't exist in the _cfg command */
1198         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1199         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1200         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1201         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1202
1203         HWRM_UNLOCK();
1204
1205         return rc;
1206 }
1207
1208 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1209                                        struct bnxt_vnic_info *vnic,
1210                                        struct bnxt_plcmodes_cfg *pmode)
1211 {
1212         int rc = 0;
1213         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1214         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1215
1216         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1217
1218         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1219         req.flags = rte_cpu_to_le_32(pmode->flags);
1220         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1221         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1222         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1223         req.enables = rte_cpu_to_le_32(
1224             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1225             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1226             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1227         );
1228
1229         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1230
1231         HWRM_CHECK_RESULT();
1232         HWRM_UNLOCK();
1233
1234         return rc;
1235 }
1236
1237 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1238 {
1239         int rc = 0;
1240         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1241         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1242         uint32_t ctx_enable_flag = 0;
1243         struct bnxt_plcmodes_cfg pmodes;
1244
1245         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1246                 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1247                 return rc;
1248         }
1249
1250         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1251         if (rc)
1252                 return rc;
1253
1254         HWRM_PREP(req, VNIC_CFG);
1255
1256         /* Only RSS support for now TBD: COS & LB */
1257         req.enables =
1258             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1259         if (vnic->lb_rule != 0xffff)
1260                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1261         if (vnic->cos_rule != 0xffff)
1262                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1263         if (vnic->rss_rule != 0xffff) {
1264                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1265                 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1266         }
1267         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1268         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1269         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1270         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1271         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1272         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1273         req.mru = rte_cpu_to_le_16(vnic->mru);
1274         if (vnic->func_default)
1275                 req.flags |=
1276                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1277         if (vnic->vlan_strip)
1278                 req.flags |=
1279                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1280         if (vnic->bd_stall)
1281                 req.flags |=
1282                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1283         if (vnic->roce_dual)
1284                 req.flags |= rte_cpu_to_le_32(
1285                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1286         if (vnic->roce_only)
1287                 req.flags |= rte_cpu_to_le_32(
1288                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1289         if (vnic->rss_dflt_cr)
1290                 req.flags |= rte_cpu_to_le_32(
1291                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1292
1293         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1294
1295         HWRM_CHECK_RESULT();
1296         HWRM_UNLOCK();
1297
1298         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1299
1300         return rc;
1301 }
1302
1303 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1304                 int16_t fw_vf_id)
1305 {
1306         int rc = 0;
1307         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1308         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1309
1310         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1311                 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1312                 return rc;
1313         }
1314         HWRM_PREP(req, VNIC_QCFG);
1315
1316         req.enables =
1317                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1318         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1319         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1320
1321         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1322
1323         HWRM_CHECK_RESULT();
1324
1325         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1326         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1327         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1328         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1329         vnic->mru = rte_le_to_cpu_16(resp->mru);
1330         vnic->func_default = rte_le_to_cpu_32(
1331                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1332         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1333                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1334         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1335                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1336         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1337                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1338         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1339                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1340         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1341                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1342
1343         HWRM_UNLOCK();
1344
1345         return rc;
1346 }
1347
1348 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1349 {
1350         int rc = 0;
1351         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1352         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1353                                                 bp->hwrm_cmd_resp_addr;
1354
1355         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1356
1357         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1358
1359         HWRM_CHECK_RESULT();
1360
1361         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1362         HWRM_UNLOCK();
1363         PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1364
1365         return rc;
1366 }
1367
1368 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1369 {
1370         int rc = 0;
1371         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1372         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1373                                                 bp->hwrm_cmd_resp_addr;
1374
1375         if (vnic->rss_rule == 0xffff) {
1376                 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1377                 return rc;
1378         }
1379         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1380
1381         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1382
1383         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1384
1385         HWRM_CHECK_RESULT();
1386         HWRM_UNLOCK();
1387
1388         vnic->rss_rule = INVALID_HW_RING_ID;
1389
1390         return rc;
1391 }
1392
1393 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1394 {
1395         int rc = 0;
1396         struct hwrm_vnic_free_input req = {.req_type = 0 };
1397         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1398
1399         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1400                 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1401                 return rc;
1402         }
1403
1404         HWRM_PREP(req, VNIC_FREE);
1405
1406         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1407
1408         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1409
1410         HWRM_CHECK_RESULT();
1411         HWRM_UNLOCK();
1412
1413         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1414         return rc;
1415 }
1416
1417 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1418                            struct bnxt_vnic_info *vnic)
1419 {
1420         int rc = 0;
1421         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1422         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1423
1424         HWRM_PREP(req, VNIC_RSS_CFG);
1425
1426         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1427
1428         req.ring_grp_tbl_addr =
1429             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1430         req.hash_key_tbl_addr =
1431             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1432         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1433
1434         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1435
1436         HWRM_CHECK_RESULT();
1437         HWRM_UNLOCK();
1438
1439         return rc;
1440 }
1441
1442 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1443                         struct bnxt_vnic_info *vnic)
1444 {
1445         int rc = 0;
1446         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1447         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1448         uint16_t size;
1449
1450         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1451
1452         req.flags = rte_cpu_to_le_32(
1453                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1454
1455         req.enables = rte_cpu_to_le_32(
1456                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1457
1458         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1459         size -= RTE_PKTMBUF_HEADROOM;
1460
1461         req.jumbo_thresh = rte_cpu_to_le_16(size);
1462         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1463
1464         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1465
1466         HWRM_CHECK_RESULT();
1467         HWRM_UNLOCK();
1468
1469         return rc;
1470 }
1471
1472 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1473                         struct bnxt_vnic_info *vnic, bool enable)
1474 {
1475         int rc = 0;
1476         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1477         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1478
1479         HWRM_PREP(req, VNIC_TPA_CFG);
1480
1481         if (enable) {
1482                 req.enables = rte_cpu_to_le_32(
1483                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1484                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1485                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1486                 req.flags = rte_cpu_to_le_32(
1487                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1488                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1489                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1490                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1491                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1492                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1493                 req.max_agg_segs = rte_cpu_to_le_16(5);
1494                 req.max_aggs =
1495                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1496                 req.min_agg_len = rte_cpu_to_le_32(512);
1497         }
1498         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1499
1500         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1501
1502         HWRM_CHECK_RESULT();
1503         HWRM_UNLOCK();
1504
1505         return rc;
1506 }
1507
1508 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1509 {
1510         struct hwrm_func_cfg_input req = {0};
1511         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1512         int rc;
1513
1514         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1515         req.enables = rte_cpu_to_le_32(
1516                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1517         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1518         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1519
1520         HWRM_PREP(req, FUNC_CFG);
1521
1522         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1523         HWRM_CHECK_RESULT();
1524         HWRM_UNLOCK();
1525
1526         bp->pf.vf_info[vf].random_mac = false;
1527
1528         return rc;
1529 }
1530
1531 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1532                                   uint64_t *dropped)
1533 {
1534         int rc = 0;
1535         struct hwrm_func_qstats_input req = {.req_type = 0};
1536         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1537
1538         HWRM_PREP(req, FUNC_QSTATS);
1539
1540         req.fid = rte_cpu_to_le_16(fid);
1541
1542         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1543
1544         HWRM_CHECK_RESULT();
1545
1546         if (dropped)
1547                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1548
1549         HWRM_UNLOCK();
1550
1551         return rc;
1552 }
1553
1554 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1555                           struct rte_eth_stats *stats)
1556 {
1557         int rc = 0;
1558         struct hwrm_func_qstats_input req = {.req_type = 0};
1559         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1560
1561         HWRM_PREP(req, FUNC_QSTATS);
1562
1563         req.fid = rte_cpu_to_le_16(fid);
1564
1565         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1566
1567         HWRM_CHECK_RESULT();
1568
1569         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1570         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1571         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1572         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1573         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1574         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1575
1576         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1577         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1578         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1579         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1580         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1581         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1582
1583         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1584         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1585
1586         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1587
1588         HWRM_UNLOCK();
1589
1590         return rc;
1591 }
1592
1593 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1594 {
1595         int rc = 0;
1596         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1597         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1598
1599         HWRM_PREP(req, FUNC_CLR_STATS);
1600
1601         req.fid = rte_cpu_to_le_16(fid);
1602
1603         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1604
1605         HWRM_CHECK_RESULT();
1606         HWRM_UNLOCK();
1607
1608         return rc;
1609 }
1610
1611 /*
1612  * HWRM utility functions
1613  */
1614
1615 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1616 {
1617         unsigned int i;
1618         int rc = 0;
1619
1620         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1621                 struct bnxt_tx_queue *txq;
1622                 struct bnxt_rx_queue *rxq;
1623                 struct bnxt_cp_ring_info *cpr;
1624
1625                 if (i >= bp->rx_cp_nr_rings) {
1626                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1627                         cpr = txq->cp_ring;
1628                 } else {
1629                         rxq = bp->rx_queues[i];
1630                         cpr = rxq->cp_ring;
1631                 }
1632
1633                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1634                 if (rc)
1635                         return rc;
1636         }
1637         return 0;
1638 }
1639
1640 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1641 {
1642         int rc;
1643         unsigned int i;
1644         struct bnxt_cp_ring_info *cpr;
1645
1646         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1647
1648                 if (i >= bp->rx_cp_nr_rings) {
1649                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1650                 } else {
1651                         cpr = bp->rx_queues[i]->cp_ring;
1652                         bp->grp_info[i].fw_stats_ctx = -1;
1653                 }
1654                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1655                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1656                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1657                         if (rc)
1658                                 return rc;
1659                 }
1660         }
1661         return 0;
1662 }
1663
1664 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1665 {
1666         unsigned int i;
1667         int rc = 0;
1668
1669         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1670                 struct bnxt_tx_queue *txq;
1671                 struct bnxt_rx_queue *rxq;
1672                 struct bnxt_cp_ring_info *cpr;
1673
1674                 if (i >= bp->rx_cp_nr_rings) {
1675                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1676                         cpr = txq->cp_ring;
1677                 } else {
1678                         rxq = bp->rx_queues[i];
1679                         cpr = rxq->cp_ring;
1680                 }
1681
1682                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1683
1684                 if (rc)
1685                         return rc;
1686         }
1687         return rc;
1688 }
1689
1690 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1691 {
1692         uint16_t idx;
1693         uint32_t rc = 0;
1694
1695         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1696
1697                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1698                         continue;
1699
1700                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1701
1702                 if (rc)
1703                         return rc;
1704         }
1705         return rc;
1706 }
1707
1708 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1709                                 unsigned int idx __rte_unused)
1710 {
1711         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1712
1713         bnxt_hwrm_ring_free(bp, cp_ring,
1714                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1715         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1716         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1717                         sizeof(*cpr->cp_desc_ring));
1718         cpr->cp_raw_cons = 0;
1719 }
1720
1721 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1722 {
1723         unsigned int i;
1724         int rc = 0;
1725
1726         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1727                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1728                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1729                 struct bnxt_ring *ring = txr->tx_ring_struct;
1730                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1731                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1732
1733                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1734                         bnxt_hwrm_ring_free(bp, ring,
1735                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1736                         ring->fw_ring_id = INVALID_HW_RING_ID;
1737                         memset(txr->tx_desc_ring, 0,
1738                                         txr->tx_ring_struct->ring_size *
1739                                         sizeof(*txr->tx_desc_ring));
1740                         memset(txr->tx_buf_ring, 0,
1741                                         txr->tx_ring_struct->ring_size *
1742                                         sizeof(*txr->tx_buf_ring));
1743                         txr->tx_prod = 0;
1744                         txr->tx_cons = 0;
1745                 }
1746                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1747                         bnxt_free_cp_ring(bp, cpr, idx);
1748                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1749                 }
1750         }
1751
1752         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1753                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1754                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1755                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1756                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1757                 unsigned int idx = i + 1;
1758
1759                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1760                         bnxt_hwrm_ring_free(bp, ring,
1761                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1762                         ring->fw_ring_id = INVALID_HW_RING_ID;
1763                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1764                         memset(rxr->rx_desc_ring, 0,
1765                                         rxr->rx_ring_struct->ring_size *
1766                                         sizeof(*rxr->rx_desc_ring));
1767                         memset(rxr->rx_buf_ring, 0,
1768                                         rxr->rx_ring_struct->ring_size *
1769                                         sizeof(*rxr->rx_buf_ring));
1770                         rxr->rx_prod = 0;
1771                 }
1772                 ring = rxr->ag_ring_struct;
1773                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1774                         bnxt_hwrm_ring_free(bp, ring,
1775                                             HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1776                         ring->fw_ring_id = INVALID_HW_RING_ID;
1777                         memset(rxr->ag_buf_ring, 0,
1778                                rxr->ag_ring_struct->ring_size *
1779                                sizeof(*rxr->ag_buf_ring));
1780                         rxr->ag_prod = 0;
1781                         bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1782                 }
1783                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1784                         bnxt_free_cp_ring(bp, cpr, idx);
1785                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1786                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1787                 }
1788         }
1789
1790         /* Default completion ring */
1791         {
1792                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1793
1794                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1795                         bnxt_free_cp_ring(bp, cpr, 0);
1796                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1797                 }
1798         }
1799
1800         return rc;
1801 }
1802
1803 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1804 {
1805         uint16_t i;
1806         uint32_t rc = 0;
1807
1808         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1809                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1810                 if (rc)
1811                         return rc;
1812         }
1813         return rc;
1814 }
1815
1816 void bnxt_free_hwrm_resources(struct bnxt *bp)
1817 {
1818         /* Release memzone */
1819         rte_free(bp->hwrm_cmd_resp_addr);
1820         rte_free(bp->hwrm_short_cmd_req_addr);
1821         bp->hwrm_cmd_resp_addr = NULL;
1822         bp->hwrm_short_cmd_req_addr = NULL;
1823         bp->hwrm_cmd_resp_dma_addr = 0;
1824         bp->hwrm_short_cmd_req_dma_addr = 0;
1825 }
1826
1827 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1828 {
1829         struct rte_pci_device *pdev = bp->pdev;
1830         char type[RTE_MEMZONE_NAMESIZE];
1831
1832         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1833                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1834         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1835         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1836         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1837         if (bp->hwrm_cmd_resp_addr == NULL)
1838                 return -ENOMEM;
1839         bp->hwrm_cmd_resp_dma_addr =
1840                 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1841         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1842                 PMD_DRV_LOG(ERR,
1843                         "unable to map response address to physical memory\n");
1844                 return -ENOMEM;
1845         }
1846         rte_spinlock_init(&bp->hwrm_lock);
1847
1848         return 0;
1849 }
1850
1851 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1852 {
1853         struct bnxt_filter_info *filter;
1854         int rc = 0;
1855
1856         STAILQ_FOREACH(filter, &vnic->filter, next) {
1857                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1858                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1859                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1860                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1861                 else
1862                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1863                 //if (rc)
1864                         //break;
1865         }
1866         return rc;
1867 }
1868
1869 static int
1870 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1871 {
1872         struct bnxt_filter_info *filter;
1873         struct rte_flow *flow;
1874         int rc = 0;
1875
1876         STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1877                 filter = flow->filter;
1878                 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1879                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1880                         rc = bnxt_hwrm_clear_em_filter(bp, filter);
1881                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1882                         rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1883                 else
1884                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1885
1886                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1887                 rte_free(flow);
1888                 //if (rc)
1889                         //break;
1890         }
1891         return rc;
1892 }
1893
1894 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1895 {
1896         struct bnxt_filter_info *filter;
1897         int rc = 0;
1898
1899         STAILQ_FOREACH(filter, &vnic->filter, next) {
1900                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1901                         rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1902                                                      filter);
1903                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1904                         rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1905                                                          filter);
1906                 else
1907                         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1908                                                      filter);
1909                 if (rc)
1910                         break;
1911         }
1912         return rc;
1913 }
1914
1915 void bnxt_free_tunnel_ports(struct bnxt *bp)
1916 {
1917         if (bp->vxlan_port_cnt)
1918                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1919                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1920         bp->vxlan_port = 0;
1921         if (bp->geneve_port_cnt)
1922                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1923                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1924         bp->geneve_port = 0;
1925 }
1926
1927 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1928 {
1929         int i;
1930
1931         if (bp->vnic_info == NULL)
1932                 return;
1933
1934         /*
1935          * Cleanup VNICs in reverse order, to make sure the L2 filter
1936          * from vnic0 is last to be cleaned up.
1937          */
1938         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1939                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1940
1941                 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1942
1943                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1944
1945                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1946
1947                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1948
1949                 bnxt_hwrm_vnic_free(bp, vnic);
1950         }
1951         /* Ring resources */
1952         bnxt_free_all_hwrm_rings(bp);
1953         bnxt_free_all_hwrm_ring_grps(bp);
1954         bnxt_free_all_hwrm_stat_ctxs(bp);
1955         bnxt_free_tunnel_ports(bp);
1956 }
1957
1958 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1959 {
1960         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1961
1962         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1963                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1964
1965         switch (conf_link_speed) {
1966         case ETH_LINK_SPEED_10M_HD:
1967         case ETH_LINK_SPEED_100M_HD:
1968                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1969         }
1970         return hw_link_duplex;
1971 }
1972
1973 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1974 {
1975         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1976 }
1977
1978 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1979 {
1980         uint16_t eth_link_speed = 0;
1981
1982         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1983                 return ETH_LINK_SPEED_AUTONEG;
1984
1985         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1986         case ETH_LINK_SPEED_100M:
1987         case ETH_LINK_SPEED_100M_HD:
1988                 eth_link_speed =
1989                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1990                 break;
1991         case ETH_LINK_SPEED_1G:
1992                 eth_link_speed =
1993                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1994                 break;
1995         case ETH_LINK_SPEED_2_5G:
1996                 eth_link_speed =
1997                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1998                 break;
1999         case ETH_LINK_SPEED_10G:
2000                 eth_link_speed =
2001                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2002                 break;
2003         case ETH_LINK_SPEED_20G:
2004                 eth_link_speed =
2005                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2006                 break;
2007         case ETH_LINK_SPEED_25G:
2008                 eth_link_speed =
2009                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2010                 break;
2011         case ETH_LINK_SPEED_40G:
2012                 eth_link_speed =
2013                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2014                 break;
2015         case ETH_LINK_SPEED_50G:
2016                 eth_link_speed =
2017                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2018                 break;
2019         case ETH_LINK_SPEED_100G:
2020                 eth_link_speed =
2021                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2022                 break;
2023         default:
2024                 PMD_DRV_LOG(ERR,
2025                         "Unsupported link speed %d; default to AUTO\n",
2026                         conf_link_speed);
2027                 break;
2028         }
2029         return eth_link_speed;
2030 }
2031
2032 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2033                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2034                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2035                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2036
2037 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2038 {
2039         uint32_t one_speed;
2040
2041         if (link_speed == ETH_LINK_SPEED_AUTONEG)
2042                 return 0;
2043
2044         if (link_speed & ETH_LINK_SPEED_FIXED) {
2045                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2046
2047                 if (one_speed & (one_speed - 1)) {
2048                         PMD_DRV_LOG(ERR,
2049                                 "Invalid advertised speeds (%u) for port %u\n",
2050                                 link_speed, port_id);
2051                         return -EINVAL;
2052                 }
2053                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2054                         PMD_DRV_LOG(ERR,
2055                                 "Unsupported advertised speed (%u) for port %u\n",
2056                                 link_speed, port_id);
2057                         return -EINVAL;
2058                 }
2059         } else {
2060                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2061                         PMD_DRV_LOG(ERR,
2062                                 "Unsupported advertised speeds (%u) for port %u\n",
2063                                 link_speed, port_id);
2064                         return -EINVAL;
2065                 }
2066         }
2067         return 0;
2068 }
2069
2070 static uint16_t
2071 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2072 {
2073         uint16_t ret = 0;
2074
2075         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2076                 if (bp->link_info.support_speeds)
2077                         return bp->link_info.support_speeds;
2078                 link_speed = BNXT_SUPPORTED_SPEEDS;
2079         }
2080
2081         if (link_speed & ETH_LINK_SPEED_100M)
2082                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2083         if (link_speed & ETH_LINK_SPEED_100M_HD)
2084                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2085         if (link_speed & ETH_LINK_SPEED_1G)
2086                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2087         if (link_speed & ETH_LINK_SPEED_2_5G)
2088                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2089         if (link_speed & ETH_LINK_SPEED_10G)
2090                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2091         if (link_speed & ETH_LINK_SPEED_20G)
2092                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2093         if (link_speed & ETH_LINK_SPEED_25G)
2094                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2095         if (link_speed & ETH_LINK_SPEED_40G)
2096                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2097         if (link_speed & ETH_LINK_SPEED_50G)
2098                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2099         if (link_speed & ETH_LINK_SPEED_100G)
2100                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2101         return ret;
2102 }
2103
2104 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2105 {
2106         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2107
2108         switch (hw_link_speed) {
2109         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2110                 eth_link_speed = ETH_SPEED_NUM_100M;
2111                 break;
2112         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2113                 eth_link_speed = ETH_SPEED_NUM_1G;
2114                 break;
2115         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2116                 eth_link_speed = ETH_SPEED_NUM_2_5G;
2117                 break;
2118         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2119                 eth_link_speed = ETH_SPEED_NUM_10G;
2120                 break;
2121         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2122                 eth_link_speed = ETH_SPEED_NUM_20G;
2123                 break;
2124         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2125                 eth_link_speed = ETH_SPEED_NUM_25G;
2126                 break;
2127         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2128                 eth_link_speed = ETH_SPEED_NUM_40G;
2129                 break;
2130         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2131                 eth_link_speed = ETH_SPEED_NUM_50G;
2132                 break;
2133         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2134                 eth_link_speed = ETH_SPEED_NUM_100G;
2135                 break;
2136         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2137         default:
2138                 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2139                         hw_link_speed);
2140                 break;
2141         }
2142         return eth_link_speed;
2143 }
2144
2145 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2146 {
2147         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2148
2149         switch (hw_link_duplex) {
2150         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2151         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2152                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2153                 break;
2154         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2155                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2156                 break;
2157         default:
2158                 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2159                         hw_link_duplex);
2160                 break;
2161         }
2162         return eth_link_duplex;
2163 }
2164
2165 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2166 {
2167         int rc = 0;
2168         struct bnxt_link_info *link_info = &bp->link_info;
2169
2170         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2171         if (rc) {
2172                 PMD_DRV_LOG(ERR,
2173                         "Get link config failed with rc %d\n", rc);
2174                 goto exit;
2175         }
2176         if (link_info->link_speed)
2177                 link->link_speed =
2178                         bnxt_parse_hw_link_speed(link_info->link_speed);
2179         else
2180                 link->link_speed = ETH_SPEED_NUM_NONE;
2181         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2182         link->link_status = link_info->link_up;
2183         link->link_autoneg = link_info->auto_mode ==
2184                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2185                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2186 exit:
2187         return rc;
2188 }
2189
2190 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2191 {
2192         int rc = 0;
2193         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2194         struct bnxt_link_info link_req;
2195         uint16_t speed, autoneg;
2196
2197         if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2198                 return 0;
2199
2200         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2201                         bp->eth_dev->data->port_id);
2202         if (rc)
2203                 goto error;
2204
2205         memset(&link_req, 0, sizeof(link_req));
2206         link_req.link_up = link_up;
2207         if (!link_up)
2208                 goto port_phy_cfg;
2209
2210         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2211         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2212         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2213         /* Autoneg can be done only when the FW allows */
2214         if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2215                                 bp->link_info.force_link_speed)) {
2216                 link_req.phy_flags |=
2217                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2218                 link_req.auto_link_speed_mask =
2219                         bnxt_parse_eth_link_speed_mask(bp,
2220                                                        dev_conf->link_speeds);
2221         } else {
2222                 if (bp->link_info.phy_type ==
2223                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2224                     bp->link_info.phy_type ==
2225                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2226                     bp->link_info.media_type ==
2227                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2228                         PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2229                         return -EINVAL;
2230                 }
2231
2232                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2233                 /* If user wants a particular speed try that first. */
2234                 if (speed)
2235                         link_req.link_speed = speed;
2236                 else if (bp->link_info.force_link_speed)
2237                         link_req.link_speed = bp->link_info.force_link_speed;
2238                 else
2239                         link_req.link_speed = bp->link_info.auto_link_speed;
2240         }
2241         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2242         link_req.auto_pause = bp->link_info.auto_pause;
2243         link_req.force_pause = bp->link_info.force_pause;
2244
2245 port_phy_cfg:
2246         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2247         if (rc) {
2248                 PMD_DRV_LOG(ERR,
2249                         "Set link config failed with rc %d\n", rc);
2250         }
2251
2252 error:
2253         return rc;
2254 }
2255
2256 /* JIRA 22088 */
2257 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2258 {
2259         struct hwrm_func_qcfg_input req = {0};
2260         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2261         uint16_t flags;
2262         int rc = 0;
2263
2264         HWRM_PREP(req, FUNC_QCFG);
2265         req.fid = rte_cpu_to_le_16(0xffff);
2266
2267         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2268
2269         HWRM_CHECK_RESULT();
2270
2271         /* Hard Coded.. 0xfff VLAN ID mask */
2272         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2273         flags = rte_le_to_cpu_16(resp->flags);
2274         if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2275                 bp->flags |= BNXT_FLAG_MULTI_HOST;
2276
2277         switch (resp->port_partition_type) {
2278         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2279         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2280         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2281                 bp->port_partition_type = resp->port_partition_type;
2282                 break;
2283         default:
2284                 bp->port_partition_type = 0;
2285                 break;
2286         }
2287
2288         HWRM_UNLOCK();
2289
2290         return rc;
2291 }
2292
2293 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2294                                    struct hwrm_func_qcaps_output *qcaps)
2295 {
2296         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2297         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2298                sizeof(qcaps->mac_address));
2299         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2300         qcaps->max_rx_rings = fcfg->num_rx_rings;
2301         qcaps->max_tx_rings = fcfg->num_tx_rings;
2302         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2303         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2304         qcaps->max_vfs = 0;
2305         qcaps->first_vf_id = 0;
2306         qcaps->max_vnics = fcfg->num_vnics;
2307         qcaps->max_decap_records = 0;
2308         qcaps->max_encap_records = 0;
2309         qcaps->max_tx_wm_flows = 0;
2310         qcaps->max_tx_em_flows = 0;
2311         qcaps->max_rx_wm_flows = 0;
2312         qcaps->max_rx_em_flows = 0;
2313         qcaps->max_flow_id = 0;
2314         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2315         qcaps->max_sp_tx_rings = 0;
2316         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2317 }
2318
2319 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2320 {
2321         struct hwrm_func_cfg_input req = {0};
2322         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2323         int rc;
2324
2325         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2326                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2327                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2328                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2329                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2330                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2331                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2332                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2333                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2334                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2335         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2336         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2337         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2338                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2339         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2340         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2341         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2342         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2343         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2344         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2345         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2346         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2347         req.fid = rte_cpu_to_le_16(0xffff);
2348
2349         HWRM_PREP(req, FUNC_CFG);
2350
2351         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2352
2353         HWRM_CHECK_RESULT();
2354         HWRM_UNLOCK();
2355
2356         return rc;
2357 }
2358
2359 static void populate_vf_func_cfg_req(struct bnxt *bp,
2360                                      struct hwrm_func_cfg_input *req,
2361                                      int num_vfs)
2362 {
2363         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2364                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2365                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2366                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2367                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2368                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2369                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2370                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2371                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2372                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2373
2374         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2375                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2376         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2377                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2378         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2379                                                 (num_vfs + 1));
2380         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2381         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2382                                                (num_vfs + 1));
2383         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2384         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2385         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2386         /* TODO: For now, do not support VMDq/RFS on VFs. */
2387         req->num_vnics = rte_cpu_to_le_16(1);
2388         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2389                                                  (num_vfs + 1));
2390 }
2391
2392 static void add_random_mac_if_needed(struct bnxt *bp,
2393                                      struct hwrm_func_cfg_input *cfg_req,
2394                                      int vf)
2395 {
2396         struct ether_addr mac;
2397
2398         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2399                 return;
2400
2401         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2402                 cfg_req->enables |=
2403                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2404                 eth_random_addr(cfg_req->dflt_mac_addr);
2405                 bp->pf.vf_info[vf].random_mac = true;
2406         } else {
2407                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2408         }
2409 }
2410
2411 static void reserve_resources_from_vf(struct bnxt *bp,
2412                                       struct hwrm_func_cfg_input *cfg_req,
2413                                       int vf)
2414 {
2415         struct hwrm_func_qcaps_input req = {0};
2416         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2417         int rc;
2418
2419         /* Get the actual allocated values now */
2420         HWRM_PREP(req, FUNC_QCAPS);
2421         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2422         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2423
2424         if (rc) {
2425                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2426                 copy_func_cfg_to_qcaps(cfg_req, resp);
2427         } else if (resp->error_code) {
2428                 rc = rte_le_to_cpu_16(resp->error_code);
2429                 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2430                 copy_func_cfg_to_qcaps(cfg_req, resp);
2431         }
2432
2433         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2434         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2435         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2436         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2437         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2438         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2439         /*
2440          * TODO: While not supporting VMDq with VFs, max_vnics is always
2441          * forced to 1 in this case
2442          */
2443         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2444         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2445
2446         HWRM_UNLOCK();
2447 }
2448
2449 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2450 {
2451         struct hwrm_func_qcfg_input req = {0};
2452         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2453         int rc;
2454
2455         /* Check for zero MAC address */
2456         HWRM_PREP(req, FUNC_QCFG);
2457         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2458         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2459         if (rc) {
2460                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2461                 return -1;
2462         } else if (resp->error_code) {
2463                 rc = rte_le_to_cpu_16(resp->error_code);
2464                 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2465                 return -1;
2466         }
2467         rc = rte_le_to_cpu_16(resp->vlan);
2468
2469         HWRM_UNLOCK();
2470
2471         return rc;
2472 }
2473
2474 static int update_pf_resource_max(struct bnxt *bp)
2475 {
2476         struct hwrm_func_qcfg_input req = {0};
2477         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2478         int rc;
2479
2480         /* And copy the allocated numbers into the pf struct */
2481         HWRM_PREP(req, FUNC_QCFG);
2482         req.fid = rte_cpu_to_le_16(0xffff);
2483         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2484         HWRM_CHECK_RESULT();
2485
2486         /* Only TX ring value reflects actual allocation? TODO */
2487         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2488         bp->pf.evb_mode = resp->evb_mode;
2489
2490         HWRM_UNLOCK();
2491
2492         return rc;
2493 }
2494
2495 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2496 {
2497         int rc;
2498
2499         if (!BNXT_PF(bp)) {
2500                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2501                 return -1;
2502         }
2503
2504         rc = bnxt_hwrm_func_qcaps(bp);
2505         if (rc)
2506                 return rc;
2507
2508         bp->pf.func_cfg_flags &=
2509                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2510                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2511         bp->pf.func_cfg_flags |=
2512                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2513         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2514         return rc;
2515 }
2516
2517 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2518 {
2519         struct hwrm_func_cfg_input req = {0};
2520         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2521         int i;
2522         size_t sz;
2523         int rc = 0;
2524         size_t req_buf_sz;
2525
2526         if (!BNXT_PF(bp)) {
2527                 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2528                 return -1;
2529         }
2530
2531         rc = bnxt_hwrm_func_qcaps(bp);
2532
2533         if (rc)
2534                 return rc;
2535
2536         bp->pf.active_vfs = num_vfs;
2537
2538         /*
2539          * First, configure the PF to only use one TX ring.  This ensures that
2540          * there are enough rings for all VFs.
2541          *
2542          * If we don't do this, when we call func_alloc() later, we will lock
2543          * extra rings to the PF that won't be available during func_cfg() of
2544          * the VFs.
2545          *
2546          * This has been fixed with firmware versions above 20.6.54
2547          */
2548         bp->pf.func_cfg_flags &=
2549                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2550                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2551         bp->pf.func_cfg_flags |=
2552                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2553         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2554         if (rc)
2555                 return rc;
2556
2557         /*
2558          * Now, create and register a buffer to hold forwarded VF requests
2559          */
2560         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2561         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2562                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2563         if (bp->pf.vf_req_buf == NULL) {
2564                 rc = -ENOMEM;
2565                 goto error_free;
2566         }
2567         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2568                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2569         for (i = 0; i < num_vfs; i++)
2570                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2571                                         (i * HWRM_MAX_REQ_LEN);
2572
2573         rc = bnxt_hwrm_func_buf_rgtr(bp);
2574         if (rc)
2575                 goto error_free;
2576
2577         populate_vf_func_cfg_req(bp, &req, num_vfs);
2578
2579         bp->pf.active_vfs = 0;
2580         for (i = 0; i < num_vfs; i++) {
2581                 add_random_mac_if_needed(bp, &req, i);
2582
2583                 HWRM_PREP(req, FUNC_CFG);
2584                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2585                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2586                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2587
2588                 /* Clear enable flag for next pass */
2589                 req.enables &= ~rte_cpu_to_le_32(
2590                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2591
2592                 if (rc || resp->error_code) {
2593                         PMD_DRV_LOG(ERR,
2594                                 "Failed to initizlie VF %d\n", i);
2595                         PMD_DRV_LOG(ERR,
2596                                 "Not all VFs available. (%d, %d)\n",
2597                                 rc, resp->error_code);
2598                         HWRM_UNLOCK();
2599                         break;
2600                 }
2601
2602                 HWRM_UNLOCK();
2603
2604                 reserve_resources_from_vf(bp, &req, i);
2605                 bp->pf.active_vfs++;
2606                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2607         }
2608
2609         /*
2610          * Now configure the PF to use "the rest" of the resources
2611          * We're using STD_TX_RING_MODE here though which will limit the TX
2612          * rings.  This will allow QoS to function properly.  Not setting this
2613          * will cause PF rings to break bandwidth settings.
2614          */
2615         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2616         if (rc)
2617                 goto error_free;
2618
2619         rc = update_pf_resource_max(bp);
2620         if (rc)
2621                 goto error_free;
2622
2623         return rc;
2624
2625 error_free:
2626         bnxt_hwrm_func_buf_unrgtr(bp);
2627         return rc;
2628 }
2629
2630 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2631 {
2632         struct hwrm_func_cfg_input req = {0};
2633         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2634         int rc;
2635
2636         HWRM_PREP(req, FUNC_CFG);
2637
2638         req.fid = rte_cpu_to_le_16(0xffff);
2639         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2640         req.evb_mode = bp->pf.evb_mode;
2641
2642         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2643         HWRM_CHECK_RESULT();
2644         HWRM_UNLOCK();
2645
2646         return rc;
2647 }
2648
2649 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2650                                 uint8_t tunnel_type)
2651 {
2652         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2653         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2654         int rc = 0;
2655
2656         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2657         req.tunnel_type = tunnel_type;
2658         req.tunnel_dst_port_val = port;
2659         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2660         HWRM_CHECK_RESULT();
2661
2662         switch (tunnel_type) {
2663         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2664                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2665                 bp->vxlan_port = port;
2666                 break;
2667         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2668                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2669                 bp->geneve_port = port;
2670                 break;
2671         default:
2672                 break;
2673         }
2674
2675         HWRM_UNLOCK();
2676
2677         return rc;
2678 }
2679
2680 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2681                                 uint8_t tunnel_type)
2682 {
2683         struct hwrm_tunnel_dst_port_free_input req = {0};
2684         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2685         int rc = 0;
2686
2687         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2688
2689         req.tunnel_type = tunnel_type;
2690         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2691         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2692
2693         HWRM_CHECK_RESULT();
2694         HWRM_UNLOCK();
2695
2696         return rc;
2697 }
2698
2699 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2700                                         uint32_t flags)
2701 {
2702         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2703         struct hwrm_func_cfg_input req = {0};
2704         int rc;
2705
2706         HWRM_PREP(req, FUNC_CFG);
2707
2708         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2709         req.flags = rte_cpu_to_le_32(flags);
2710         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2711
2712         HWRM_CHECK_RESULT();
2713         HWRM_UNLOCK();
2714
2715         return rc;
2716 }
2717
2718 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2719 {
2720         uint32_t *flag = flagp;
2721
2722         vnic->flags = *flag;
2723 }
2724
2725 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2726 {
2727         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2728 }
2729
2730 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2731 {
2732         int rc = 0;
2733         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2734         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2735
2736         HWRM_PREP(req, FUNC_BUF_RGTR);
2737
2738         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2739         req.req_buf_page_size = rte_cpu_to_le_16(
2740                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2741         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2742         req.req_buf_page_addr[0] =
2743                 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2744         if (req.req_buf_page_addr[0] == 0) {
2745                 PMD_DRV_LOG(ERR,
2746                         "unable to map buffer address to physical memory\n");
2747                 return -ENOMEM;
2748         }
2749
2750         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2751
2752         HWRM_CHECK_RESULT();
2753         HWRM_UNLOCK();
2754
2755         return rc;
2756 }
2757
2758 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2759 {
2760         int rc = 0;
2761         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2762         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2763
2764         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2765
2766         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2767
2768         HWRM_CHECK_RESULT();
2769         HWRM_UNLOCK();
2770
2771         return rc;
2772 }
2773
2774 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2775 {
2776         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2777         struct hwrm_func_cfg_input req = {0};
2778         int rc;
2779
2780         HWRM_PREP(req, FUNC_CFG);
2781
2782         req.fid = rte_cpu_to_le_16(0xffff);
2783         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2784         req.enables = rte_cpu_to_le_32(
2785                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2786         req.async_event_cr = rte_cpu_to_le_16(
2787                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2788         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2789
2790         HWRM_CHECK_RESULT();
2791         HWRM_UNLOCK();
2792
2793         return rc;
2794 }
2795
2796 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2797 {
2798         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2799         struct hwrm_func_vf_cfg_input req = {0};
2800         int rc;
2801
2802         HWRM_PREP(req, FUNC_VF_CFG);
2803
2804         req.enables = rte_cpu_to_le_32(
2805                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2806         req.async_event_cr = rte_cpu_to_le_16(
2807                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2808         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2809
2810         HWRM_CHECK_RESULT();
2811         HWRM_UNLOCK();
2812
2813         return rc;
2814 }
2815
2816 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2817 {
2818         struct hwrm_func_cfg_input req = {0};
2819         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2820         uint16_t dflt_vlan, fid;
2821         uint32_t func_cfg_flags;
2822         int rc = 0;
2823
2824         HWRM_PREP(req, FUNC_CFG);
2825
2826         if (is_vf) {
2827                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2828                 fid = bp->pf.vf_info[vf].fid;
2829                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2830         } else {
2831                 fid = rte_cpu_to_le_16(0xffff);
2832                 func_cfg_flags = bp->pf.func_cfg_flags;
2833                 dflt_vlan = bp->vlan;
2834         }
2835
2836         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2837         req.fid = rte_cpu_to_le_16(fid);
2838         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2839         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2840
2841         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2842
2843         HWRM_CHECK_RESULT();
2844         HWRM_UNLOCK();
2845
2846         return rc;
2847 }
2848
2849 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2850                         uint16_t max_bw, uint16_t enables)
2851 {
2852         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2853         struct hwrm_func_cfg_input req = {0};
2854         int rc;
2855
2856         HWRM_PREP(req, FUNC_CFG);
2857
2858         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2859         req.enables |= rte_cpu_to_le_32(enables);
2860         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2861         req.max_bw = rte_cpu_to_le_32(max_bw);
2862         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2863
2864         HWRM_CHECK_RESULT();
2865         HWRM_UNLOCK();
2866
2867         return rc;
2868 }
2869
2870 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2871 {
2872         struct hwrm_func_cfg_input req = {0};
2873         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2874         int rc = 0;
2875
2876         HWRM_PREP(req, FUNC_CFG);
2877
2878         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2879         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2880         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2881         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2882
2883         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2884
2885         HWRM_CHECK_RESULT();
2886         HWRM_UNLOCK();
2887
2888         return rc;
2889 }
2890
2891 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2892                               void *encaped, size_t ec_size)
2893 {
2894         int rc = 0;
2895         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2896         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2897
2898         if (ec_size > sizeof(req.encap_request))
2899                 return -1;
2900
2901         HWRM_PREP(req, REJECT_FWD_RESP);
2902
2903         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2904         memcpy(req.encap_request, encaped, ec_size);
2905
2906         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2907
2908         HWRM_CHECK_RESULT();
2909         HWRM_UNLOCK();
2910
2911         return rc;
2912 }
2913
2914 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2915                                        struct ether_addr *mac)
2916 {
2917         struct hwrm_func_qcfg_input req = {0};
2918         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2919         int rc;
2920
2921         HWRM_PREP(req, FUNC_QCFG);
2922
2923         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2924         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2925
2926         HWRM_CHECK_RESULT();
2927
2928         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2929
2930         HWRM_UNLOCK();
2931
2932         return rc;
2933 }
2934
2935 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2936                             void *encaped, size_t ec_size)
2937 {
2938         int rc = 0;
2939         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2940         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2941
2942         if (ec_size > sizeof(req.encap_request))
2943                 return -1;
2944
2945         HWRM_PREP(req, EXEC_FWD_RESP);
2946
2947         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2948         memcpy(req.encap_request, encaped, ec_size);
2949
2950         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2951
2952         HWRM_CHECK_RESULT();
2953         HWRM_UNLOCK();
2954
2955         return rc;
2956 }
2957
2958 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2959                          struct rte_eth_stats *stats, uint8_t rx)
2960 {
2961         int rc = 0;
2962         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2963         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2964
2965         HWRM_PREP(req, STAT_CTX_QUERY);
2966
2967         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2968
2969         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2970
2971         HWRM_CHECK_RESULT();
2972
2973         if (rx) {
2974                 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2975                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2976                 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2977                 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2978                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2979                 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2980                 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2981                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2982         } else {
2983                 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2984                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2985                 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2986                 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2987                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2988                 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2989                 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2990         }
2991
2992
2993         HWRM_UNLOCK();
2994
2995         return rc;
2996 }
2997
2998 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2999 {
3000         struct hwrm_port_qstats_input req = {0};
3001         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3002         struct bnxt_pf_info *pf = &bp->pf;
3003         int rc;
3004
3005         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3006                 return 0;
3007
3008         HWRM_PREP(req, PORT_QSTATS);
3009
3010         req.port_id = rte_cpu_to_le_16(pf->port_id);
3011         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3012         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3013         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3014
3015         HWRM_CHECK_RESULT();
3016         HWRM_UNLOCK();
3017
3018         return rc;
3019 }
3020
3021 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3022 {
3023         struct hwrm_port_clr_stats_input req = {0};
3024         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3025         struct bnxt_pf_info *pf = &bp->pf;
3026         int rc;
3027
3028         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3029                 return 0;
3030
3031         HWRM_PREP(req, PORT_CLR_STATS);
3032
3033         req.port_id = rte_cpu_to_le_16(pf->port_id);
3034         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3035
3036         HWRM_CHECK_RESULT();
3037         HWRM_UNLOCK();
3038
3039         return rc;
3040 }
3041
3042 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3043 {
3044         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3045         struct hwrm_port_led_qcaps_input req = {0};
3046         int rc;
3047
3048         if (BNXT_VF(bp))
3049                 return 0;
3050
3051         HWRM_PREP(req, PORT_LED_QCAPS);
3052         req.port_id = bp->pf.port_id;
3053         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3054
3055         HWRM_CHECK_RESULT();
3056
3057         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3058                 unsigned int i;
3059
3060                 bp->num_leds = resp->num_leds;
3061                 memcpy(bp->leds, &resp->led0_id,
3062                         sizeof(bp->leds[0]) * bp->num_leds);
3063                 for (i = 0; i < bp->num_leds; i++) {
3064                         struct bnxt_led_info *led = &bp->leds[i];
3065
3066                         uint16_t caps = led->led_state_caps;
3067
3068                         if (!led->led_group_id ||
3069                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3070                                 bp->num_leds = 0;
3071                                 break;
3072                         }
3073                 }
3074         }
3075
3076         HWRM_UNLOCK();
3077
3078         return rc;
3079 }
3080
3081 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3082 {
3083         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3084         struct hwrm_port_led_cfg_input req = {0};
3085         struct bnxt_led_cfg *led_cfg;
3086         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3087         uint16_t duration = 0;
3088         int rc, i;
3089
3090         if (!bp->num_leds || BNXT_VF(bp))
3091                 return -EOPNOTSUPP;
3092
3093         HWRM_PREP(req, PORT_LED_CFG);
3094
3095         if (led_on) {
3096                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3097                 duration = rte_cpu_to_le_16(500);
3098         }
3099         req.port_id = bp->pf.port_id;
3100         req.num_leds = bp->num_leds;
3101         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3102         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3103                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3104                 led_cfg->led_id = bp->leds[i].led_id;
3105                 led_cfg->led_state = led_state;
3106                 led_cfg->led_blink_on = duration;
3107                 led_cfg->led_blink_off = duration;
3108                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3109         }
3110
3111         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3112
3113         HWRM_CHECK_RESULT();
3114         HWRM_UNLOCK();
3115
3116         return rc;
3117 }
3118
3119 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3120                                uint32_t *length)
3121 {
3122         int rc;
3123         struct hwrm_nvm_get_dir_info_input req = {0};
3124         struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3125
3126         HWRM_PREP(req, NVM_GET_DIR_INFO);
3127
3128         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3129
3130         HWRM_CHECK_RESULT();
3131         HWRM_UNLOCK();
3132
3133         if (!rc) {
3134                 *entries = rte_le_to_cpu_32(resp->entries);
3135                 *length = rte_le_to_cpu_32(resp->entry_length);
3136         }
3137         return rc;
3138 }
3139
3140 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3141 {
3142         int rc;
3143         uint32_t dir_entries;
3144         uint32_t entry_length;
3145         uint8_t *buf;
3146         size_t buflen;
3147         rte_iova_t dma_handle;
3148         struct hwrm_nvm_get_dir_entries_input req = {0};
3149         struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3150
3151         rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3152         if (rc != 0)
3153                 return rc;
3154
3155         *data++ = dir_entries;
3156         *data++ = entry_length;
3157         len -= 2;
3158         memset(data, 0xff, len);
3159
3160         buflen = dir_entries * entry_length;
3161         buf = rte_malloc("nvm_dir", buflen, 0);
3162         rte_mem_lock_page(buf);
3163         if (buf == NULL)
3164                 return -ENOMEM;
3165         dma_handle = rte_mem_virt2iova(buf);
3166         if (dma_handle == 0) {
3167                 PMD_DRV_LOG(ERR,
3168                         "unable to map response address to physical memory\n");
3169                 return -ENOMEM;
3170         }
3171         HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3172         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3173         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3174
3175         HWRM_CHECK_RESULT();
3176         HWRM_UNLOCK();
3177
3178         if (rc == 0)
3179                 memcpy(data, buf, len > buflen ? buflen : len);
3180
3181         rte_free(buf);
3182
3183         return rc;
3184 }
3185
3186 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3187                              uint32_t offset, uint32_t length,
3188                              uint8_t *data)
3189 {
3190         int rc;
3191         uint8_t *buf;
3192         rte_iova_t dma_handle;
3193         struct hwrm_nvm_read_input req = {0};
3194         struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3195
3196         buf = rte_malloc("nvm_item", length, 0);
3197         rte_mem_lock_page(buf);
3198         if (!buf)
3199                 return -ENOMEM;
3200
3201         dma_handle = rte_mem_virt2iova(buf);
3202         if (dma_handle == 0) {
3203                 PMD_DRV_LOG(ERR,
3204                         "unable to map response address to physical memory\n");
3205                 return -ENOMEM;
3206         }
3207         HWRM_PREP(req, NVM_READ);
3208         req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3209         req.dir_idx = rte_cpu_to_le_16(index);
3210         req.offset = rte_cpu_to_le_32(offset);
3211         req.len = rte_cpu_to_le_32(length);
3212         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3213         HWRM_CHECK_RESULT();
3214         HWRM_UNLOCK();
3215         if (rc == 0)
3216                 memcpy(data, buf, length);
3217
3218         rte_free(buf);
3219         return rc;
3220 }
3221
3222 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3223 {
3224         int rc;
3225         struct hwrm_nvm_erase_dir_entry_input req = {0};
3226         struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3227
3228         HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3229         req.dir_idx = rte_cpu_to_le_16(index);
3230         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3231         HWRM_CHECK_RESULT();
3232         HWRM_UNLOCK();
3233
3234         return rc;
3235 }
3236
3237
3238 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3239                           uint16_t dir_ordinal, uint16_t dir_ext,
3240                           uint16_t dir_attr, const uint8_t *data,
3241                           size_t data_len)
3242 {
3243         int rc;
3244         struct hwrm_nvm_write_input req = {0};
3245         struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3246         rte_iova_t dma_handle;
3247         uint8_t *buf;
3248
3249         HWRM_PREP(req, NVM_WRITE);
3250
3251         req.dir_type = rte_cpu_to_le_16(dir_type);
3252         req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3253         req.dir_ext = rte_cpu_to_le_16(dir_ext);
3254         req.dir_attr = rte_cpu_to_le_16(dir_attr);
3255         req.dir_data_length = rte_cpu_to_le_32(data_len);
3256
3257         buf = rte_malloc("nvm_write", data_len, 0);
3258         rte_mem_lock_page(buf);
3259         if (!buf)
3260                 return -ENOMEM;
3261
3262         dma_handle = rte_mem_virt2iova(buf);
3263         if (dma_handle == 0) {
3264                 PMD_DRV_LOG(ERR,
3265                         "unable to map response address to physical memory\n");
3266                 return -ENOMEM;
3267         }
3268         memcpy(buf, data, data_len);
3269         req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3270
3271         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3272
3273         HWRM_CHECK_RESULT();
3274         HWRM_UNLOCK();
3275
3276         rte_free(buf);
3277         return rc;
3278 }
3279
3280 static void
3281 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3282 {
3283         uint32_t *count = cbdata;
3284
3285         *count = *count + 1;
3286 }
3287
3288 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3289                                      struct bnxt_vnic_info *vnic __rte_unused)
3290 {
3291         return 0;
3292 }
3293
3294 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3295 {
3296         uint32_t count = 0;
3297
3298         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3299             &count, bnxt_vnic_count_hwrm_stub);
3300
3301         return count;
3302 }
3303
3304 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3305                                         uint16_t *vnic_ids)
3306 {
3307         struct hwrm_func_vf_vnic_ids_query_input req = {0};
3308         struct hwrm_func_vf_vnic_ids_query_output *resp =
3309                                                 bp->hwrm_cmd_resp_addr;
3310         int rc;
3311
3312         /* First query all VNIC ids */
3313         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3314
3315         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3316         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3317         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3318
3319         if (req.vnic_id_tbl_addr == 0) {
3320                 HWRM_UNLOCK();
3321                 PMD_DRV_LOG(ERR,
3322                 "unable to map VNIC ID table address to physical memory\n");
3323                 return -ENOMEM;
3324         }
3325         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3326         if (rc) {
3327                 HWRM_UNLOCK();
3328                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3329                 return -1;
3330         } else if (resp->error_code) {
3331                 rc = rte_le_to_cpu_16(resp->error_code);
3332                 HWRM_UNLOCK();
3333                 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3334                 return -1;
3335         }
3336         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3337
3338         HWRM_UNLOCK();
3339
3340         return rc;
3341 }
3342
3343 /*
3344  * This function queries the VNIC IDs  for a specified VF. It then calls
3345  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3346  * Then it calls the hwrm_cb function to program this new vnic configuration.
3347  */
3348 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3349         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3350         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3351 {
3352         struct bnxt_vnic_info vnic;
3353         int rc = 0;
3354         int i, num_vnic_ids;
3355         uint16_t *vnic_ids;
3356         size_t vnic_id_sz;
3357         size_t sz;
3358
3359         /* First query all VNIC ids */
3360         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3361         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3362                         RTE_CACHE_LINE_SIZE);
3363         if (vnic_ids == NULL) {
3364                 rc = -ENOMEM;
3365                 return rc;
3366         }
3367         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3368                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3369
3370         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3371
3372         if (num_vnic_ids < 0)
3373                 return num_vnic_ids;
3374
3375         /* Retrieve VNIC, update bd_stall then update */
3376
3377         for (i = 0; i < num_vnic_ids; i++) {
3378                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3379                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3380                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3381                 if (rc)
3382                         break;
3383                 if (vnic.mru <= 4)      /* Indicates unallocated */
3384                         continue;
3385
3386                 vnic_cb(&vnic, cbdata);
3387
3388                 rc = hwrm_cb(bp, &vnic);
3389                 if (rc)
3390                         break;
3391         }
3392
3393         rte_free(vnic_ids);
3394
3395         return rc;
3396 }
3397
3398 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3399                                               bool on)
3400 {
3401         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3402         struct hwrm_func_cfg_input req = {0};
3403         int rc;
3404
3405         HWRM_PREP(req, FUNC_CFG);
3406
3407         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3408         req.enables |= rte_cpu_to_le_32(
3409                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3410         req.vlan_antispoof_mode = on ?
3411                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3412                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3413         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3414
3415         HWRM_CHECK_RESULT();
3416         HWRM_UNLOCK();
3417
3418         return rc;
3419 }
3420
3421 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3422 {
3423         struct bnxt_vnic_info vnic;
3424         uint16_t *vnic_ids;
3425         size_t vnic_id_sz;
3426         int num_vnic_ids, i;
3427         size_t sz;
3428         int rc;
3429
3430         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3431         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3432                         RTE_CACHE_LINE_SIZE);
3433         if (vnic_ids == NULL) {
3434                 rc = -ENOMEM;
3435                 return rc;
3436         }
3437
3438         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3439                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3440
3441         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3442         if (rc <= 0)
3443                 goto exit;
3444         num_vnic_ids = rc;
3445
3446         /*
3447          * Loop through to find the default VNIC ID.
3448          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3449          * by sending the hwrm_func_qcfg command to the firmware.
3450          */
3451         for (i = 0; i < num_vnic_ids; i++) {
3452                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3453                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3454                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3455                                         bp->pf.first_vf_id + vf);
3456                 if (rc)
3457                         goto exit;
3458                 if (vnic.func_default) {
3459                         rte_free(vnic_ids);
3460                         return vnic.fw_vnic_id;
3461                 }
3462         }
3463         /* Could not find a default VNIC. */
3464         PMD_DRV_LOG(ERR, "No default VNIC\n");
3465 exit:
3466         rte_free(vnic_ids);
3467         return -1;
3468 }
3469
3470 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3471                          uint16_t dst_id,
3472                          struct bnxt_filter_info *filter)
3473 {
3474         int rc = 0;
3475         struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3476         struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3477         uint32_t enables = 0;
3478
3479         if (filter->fw_em_filter_id != UINT64_MAX)
3480                 bnxt_hwrm_clear_em_filter(bp, filter);
3481
3482         HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3483
3484         req.flags = rte_cpu_to_le_32(filter->flags);
3485
3486         enables = filter->enables |
3487               HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3488         req.dst_id = rte_cpu_to_le_16(dst_id);
3489
3490         if (filter->ip_addr_type) {
3491                 req.ip_addr_type = filter->ip_addr_type;
3492                 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3493         }
3494         if (enables &
3495             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3496                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3497         if (enables &
3498             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3499                 memcpy(req.src_macaddr, filter->src_macaddr,
3500                        ETHER_ADDR_LEN);
3501         if (enables &
3502             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3503                 memcpy(req.dst_macaddr, filter->dst_macaddr,
3504                        ETHER_ADDR_LEN);
3505         if (enables &
3506             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3507                 req.ovlan_vid = filter->l2_ovlan;
3508         if (enables &
3509             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3510                 req.ivlan_vid = filter->l2_ivlan;
3511         if (enables &
3512             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3513                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3514         if (enables &
3515             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3516                 req.ip_protocol = filter->ip_protocol;
3517         if (enables &
3518             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3519                 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3520         if (enables &
3521             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3522                 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3523         if (enables &
3524             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3525                 req.src_port = rte_cpu_to_be_16(filter->src_port);
3526         if (enables &
3527             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3528                 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3529         if (enables &
3530             HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3531                 req.mirror_vnic_id = filter->mirror_vnic_id;
3532
3533         req.enables = rte_cpu_to_le_32(enables);
3534
3535         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3536
3537         HWRM_CHECK_RESULT();
3538
3539         filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3540         HWRM_UNLOCK();
3541
3542         return rc;
3543 }
3544
3545 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3546 {
3547         int rc = 0;
3548         struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3549         struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3550
3551         if (filter->fw_em_filter_id == UINT64_MAX)
3552                 return 0;
3553
3554         PMD_DRV_LOG(ERR, "Clear EM filter\n");
3555         HWRM_PREP(req, CFA_EM_FLOW_FREE);
3556
3557         req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3558
3559         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3560
3561         HWRM_CHECK_RESULT();
3562         HWRM_UNLOCK();
3563
3564         filter->fw_em_filter_id = -1;
3565         filter->fw_l2_filter_id = -1;
3566
3567         return 0;
3568 }
3569
3570 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3571                          uint16_t dst_id,
3572                          struct bnxt_filter_info *filter)
3573 {
3574         int rc = 0;
3575         struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3576         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3577                                                 bp->hwrm_cmd_resp_addr;
3578         uint32_t enables = 0;
3579
3580         if (filter->fw_ntuple_filter_id != UINT64_MAX)
3581                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3582
3583         HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3584
3585         req.flags = rte_cpu_to_le_32(filter->flags);
3586
3587         enables = filter->enables |
3588               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3589         req.dst_id = rte_cpu_to_le_16(dst_id);
3590
3591
3592         if (filter->ip_addr_type) {
3593                 req.ip_addr_type = filter->ip_addr_type;
3594                 enables |=
3595                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3596         }
3597         if (enables &
3598             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3599                 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3600         if (enables &
3601             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3602                 memcpy(req.src_macaddr, filter->src_macaddr,
3603                        ETHER_ADDR_LEN);
3604         //if (enables &
3605             //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3606                 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3607                        //ETHER_ADDR_LEN);
3608         if (enables &
3609             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3610                 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3611         if (enables &
3612             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3613                 req.ip_protocol = filter->ip_protocol;
3614         if (enables &
3615             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3616                 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3617         if (enables &
3618             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3619                 req.src_ipaddr_mask[0] =
3620                         rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3621         if (enables &
3622             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3623                 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3624         if (enables &
3625             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3626                 req.dst_ipaddr_mask[0] =
3627                         rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3628         if (enables &
3629             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3630                 req.src_port = rte_cpu_to_le_16(filter->src_port);
3631         if (enables &
3632             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3633                 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3634         if (enables &
3635             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3636                 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3637         if (enables &
3638             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3639                 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3640         if (enables &
3641             HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3642                 req.mirror_vnic_id = filter->mirror_vnic_id;
3643
3644         req.enables = rte_cpu_to_le_32(enables);
3645
3646         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3647
3648         HWRM_CHECK_RESULT();
3649
3650         filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3651         HWRM_UNLOCK();
3652
3653         return rc;
3654 }
3655
3656 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3657                                 struct bnxt_filter_info *filter)
3658 {
3659         int rc = 0;
3660         struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3661         struct hwrm_cfa_ntuple_filter_free_output *resp =
3662                                                 bp->hwrm_cmd_resp_addr;
3663
3664         if (filter->fw_ntuple_filter_id == UINT64_MAX)
3665                 return 0;
3666
3667         HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3668
3669         req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3670
3671         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3672
3673         HWRM_CHECK_RESULT();
3674         HWRM_UNLOCK();
3675
3676         filter->fw_ntuple_filter_id = -1;
3677
3678         return 0;
3679 }
3680
3681 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3682 {
3683         unsigned int rss_idx, fw_idx, i;
3684
3685         if (vnic->rss_table && vnic->hash_type) {
3686                 /*
3687                  * Fill the RSS hash & redirection table with
3688                  * ring group ids for all VNICs
3689                  */
3690                 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3691                         rss_idx++, fw_idx++) {
3692                         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3693                                 fw_idx %= bp->rx_cp_nr_rings;
3694                                 if (vnic->fw_grp_ids[fw_idx] !=
3695                                     INVALID_HW_RING_ID)
3696                                         break;
3697                                 fw_idx++;
3698                         }
3699                         if (i == bp->rx_cp_nr_rings)
3700                                 return 0;
3701                         vnic->rss_table[rss_idx] =
3702                                 vnic->fw_grp_ids[fw_idx];
3703                 }
3704                 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3705         }
3706         return 0;
3707 }