net/cxgbe: enable RSS for VF
[dpdk.git] / drivers / net / cxgbe / cxgbevf_main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5
6 #include <rte_ethdev_driver.h>
7 #include <rte_ethdev_pci.h>
8 #include <rte_malloc.h>
9
10 #include "common.h"
11 #include "t4_regs.h"
12 #include "t4_msg.h"
13 #include "cxgbe.h"
14
15 /*
16  * Figure out how many Ports and Queue Sets we can support.  This depends on
17  * knowing our Virtual Function Resources and may be called a second time if
18  * we fall back from MSI-X to MSI Interrupt Mode.
19  */
20 static void size_nports_qsets(struct adapter *adapter)
21 {
22         struct vf_resources *vfres = &adapter->params.vfres;
23         unsigned int ethqsets, pmask_nports;
24
25         /*
26          * The number of "ports" which we support is equal to the number of
27          * Virtual Interfaces with which we've been provisioned.
28          */
29         adapter->params.nports = vfres->nvi;
30         if (adapter->params.nports > MAX_NPORTS) {
31                 dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
32                          " allowed virtual interfaces\n", MAX_NPORTS,
33                          adapter->params.nports);
34                 adapter->params.nports = MAX_NPORTS;
35         }
36
37         /*
38          * We may have been provisioned with more VIs than the number of
39          * ports we're allowed to access (our Port Access Rights Mask).
40          * This is obviously a configuration conflict but we don't want to
41          * do anything silly just because of that.
42          */
43         pmask_nports = hweight32(adapter->params.vfres.pmask);
44         if (pmask_nports < adapter->params.nports) {
45                 dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
46                          " virtual interfaces; limited by Port Access Rights"
47                          " mask %#x\n", pmask_nports, adapter->params.nports,
48                          adapter->params.vfres.pmask);
49                 adapter->params.nports = pmask_nports;
50         }
51
52         /*
53          * We need to reserve an Ingress Queue for the Asynchronous Firmware
54          * Event Queue.
55          *
56          * For each Queue Set, we'll need the ability to allocate two Egress
57          * Contexts -- one for the Ingress Queue Free List and one for the TX
58          * Ethernet Queue.
59          */
60         ethqsets = vfres->niqflint - 1;
61         if (vfres->nethctrl != ethqsets)
62                 ethqsets = min(vfres->nethctrl, ethqsets);
63         if (vfres->neq < ethqsets * 2)
64                 ethqsets = vfres->neq / 2;
65         if (ethqsets > MAX_ETH_QSETS)
66                 ethqsets = MAX_ETH_QSETS;
67         adapter->sge.max_ethqsets = ethqsets;
68
69         if (adapter->sge.max_ethqsets < adapter->params.nports) {
70                 dev_warn(adapter->pdev_dev, "only using %d of %d available"
71                          " virtual interfaces (too few Queue Sets)\n",
72                          adapter->sge.max_ethqsets, adapter->params.nports);
73                 adapter->params.nports = adapter->sge.max_ethqsets;
74         }
75 }
76
77 static int adap_init0vf(struct adapter *adapter)
78 {
79         u32 param, val = 0;
80         int err;
81
82         err = t4vf_fw_reset(adapter);
83         if (err < 0) {
84                 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
85                 return err;
86         }
87
88         /*
89          * Grab basic operational parameters.  These will predominantly have
90          * been set up by the Physical Function Driver or will be hard coded
91          * into the adapter.  We just have to live with them ...  Note that
92          * we _must_ get our VPD parameters before our SGE parameters because
93          * we need to know the adapter's core clock from the VPD in order to
94          * properly decode the SGE Timer Values.
95          */
96         err = t4vf_get_dev_params(adapter);
97         if (err) {
98                 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
99                         " device parameters: err=%d\n", err);
100                 return err;
101         }
102
103         err = t4vf_get_vpd_params(adapter);
104         if (err) {
105                 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
106                         " VPD parameters: err=%d\n", err);
107                 return err;
108         }
109
110         adapter->pf = t4vf_get_pf_from_vf(adapter);
111         err = t4vf_sge_init(adapter);
112         if (err) {
113                 dev_err(adapter->pdev_dev, "error in sge init\n");
114                 return err;
115         }
116
117         err = t4vf_get_rss_glb_config(adapter);
118         if (err) {
119                 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
120                         " RSS parameters: err=%d\n", err);
121                 return err;
122         }
123         if (adapter->params.rss.mode !=
124             FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
125                 dev_err(adapter->pdev_dev, "unable to operate with global RSS"
126                         " mode %d\n", adapter->params.rss.mode);
127                 return -EINVAL;
128         }
129
130         /* If we're running on newer firmware, let it know that we're
131          * prepared to deal with encapsulated CPL messages.  Older
132          * firmware won't understand this and we'll just get
133          * unencapsulated messages ...
134          */
135         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
136                 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
137         val = 1;
138         t4vf_set_params(adapter, 1, &param, &val);
139
140         /*
141          * Grab our Virtual Interface resource allocation, extract the
142          * features that we're interested in and do a bit of sanity testing on
143          * what we discover.
144          */
145         err = t4vf_get_vfres(adapter);
146         if (err) {
147                 dev_err(adapter->pdev_dev, "unable to get virtual interface"
148                         " resources: err=%d\n", err);
149                 return err;
150         }
151
152         /*
153          * Check for various parameter sanity issues.
154          */
155         if (adapter->params.vfres.pmask == 0) {
156                 dev_err(adapter->pdev_dev, "no port access configured\n"
157                         "usable!\n");
158                 return -EINVAL;
159         }
160         if (adapter->params.vfres.nvi == 0) {
161                 dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
162                         "usable!\n");
163                 return -EINVAL;
164         }
165
166         /*
167          * Initialize nports and max_ethqsets now that we have our Virtual
168          * Function Resources.
169          */
170         size_nports_qsets(adapter);
171         adapter->flags |= FW_OK;
172         return 0;
173 }
174
175 int cxgbevf_probe(struct adapter *adapter)
176 {
177         struct port_info *pi;
178         unsigned int pmask;
179         int err = 0;
180         int i;
181
182         t4_os_lock_init(&adapter->mbox_lock);
183         TAILQ_INIT(&adapter->mbox_list);
184         err = t4vf_prep_adapter(adapter);
185         if (err)
186                 return err;
187
188         if (!is_t4(adapter->params.chip)) {
189                 adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
190                 if (!adapter->bar2) {
191                         dev_err(adapter, "cannot map device bar2 region\n");
192                         err = -ENOMEM;
193                         return err;
194                 }
195         }
196
197         err = adap_init0vf(adapter);
198         if (err) {
199                 dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
200                                 __func__, err);
201                 goto out_free;
202         }
203
204         pmask = adapter->params.vfres.pmask;
205         for_each_port(adapter, i) {
206                 const unsigned int numa_node = rte_socket_id();
207                 char name[RTE_ETH_NAME_MAX_LEN];
208                 struct rte_eth_dev *eth_dev;
209                 int port_id;
210
211                 if (pmask == 0)
212                         break;
213                 port_id = ffs(pmask) - 1;
214                 pmask &= ~(1 << port_id);
215
216                 snprintf(name, sizeof(name), "%s_%d",
217                          adapter->pdev->device.name, i);
218
219                 if (i == 0) {
220                         /* First port is already allocated by DPDK */
221                         eth_dev = adapter->eth_dev;
222                         goto allocate_mac;
223                 }
224
225                 /*
226                  * now do all data allocation - for eth_dev structure,
227                  * and internal (private) data for the remaining ports
228                  */
229
230                 /* reserve an ethdev entry */
231                 eth_dev = rte_eth_dev_allocate(name);
232                 if (!eth_dev) {
233                         err = -ENOMEM;
234                         goto out_free;
235                 }
236                 eth_dev->data->dev_private =
237                         rte_zmalloc_socket(name, sizeof(struct port_info),
238                                            RTE_CACHE_LINE_SIZE, numa_node);
239                 if (!eth_dev->data->dev_private)
240                         goto out_free;
241
242 allocate_mac:
243                 pi = (struct port_info *)eth_dev->data->dev_private;
244                 adapter->port[i] = pi;
245                 pi->eth_dev = eth_dev;
246                 pi->adapter = adapter;
247                 pi->xact_addr_filt = -1;
248                 pi->port_id = port_id;
249                 pi->pidx = i;
250
251                 pi->eth_dev->device = &adapter->pdev->device;
252                 pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
253                 pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
254                 pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
255
256                 rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
257                 pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
258                                                            ETHER_ADDR_LEN, 0);
259                 if (!pi->eth_dev->data->mac_addrs) {
260                         dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
261                                 __func__);
262                         err = -ENOMEM;
263                         goto out_free;
264                 }
265         }
266
267         if (adapter->flags & FW_OK) {
268                 err = t4vf_port_init(adapter);
269                 if (err) {
270                         dev_err(adapter, "%s: t4_port_init failed with err %d\n",
271                                 __func__, err);
272                         goto out_free;
273                 }
274         }
275
276         cfg_queues(adapter->eth_dev);
277         print_adapter_info(adapter);
278         print_port_info(adapter);
279
280         err = init_rss(adapter);
281         if (err)
282                 goto out_free;
283         return 0;
284
285 out_free:
286         for_each_port(adapter, i) {
287                 pi = adap2pinfo(adapter, i);
288                 if (pi->viid != 0)
289                         t4_free_vi(adapter, adapter->mbox, adapter->pf,
290                                    0, pi->viid);
291                 /* Skip first port since it'll be de-allocated by DPDK */
292                 if (i == 0)
293                         continue;
294                 if (pi->eth_dev) {
295                         if (pi->eth_dev->data->dev_private)
296                                 rte_free(pi->eth_dev->data->dev_private);
297                         rte_eth_dev_release_port(pi->eth_dev);
298                 }
299         }
300         return -err;
301 }