cxgbe: add device configuration and Rx support
[dpdk.git] / drivers / net / cxgbe / cxgbe_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2014-2015 Chelsio Communications.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Chelsio Communications nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43
44 #include <rte_byteorder.h>
45 #include <rte_common.h>
46 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
63 #include <rte_dev.h>
64
65 #include "cxgbe.h"
66
67 /*
68  * Macros needed to support the PCI Device ID Table ...
69  */
70 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
71         static struct rte_pci_id cxgb4_pci_tbl[] = {
72 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
73
74 #define PCI_VENDOR_ID_CHELSIO 0x1425
75
76 #define CH_PCI_ID_TABLE_ENTRY(devid) \
77                 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
78
79 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
80                 { .vendor_id = 0, } \
81         }
82
83 /*
84  *... and the PCI ID Table itself ...
85  */
86 #include "t4_pci_id_tbl.h"
87
88 static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
89                                 uint16_t nb_pkts)
90 {
91         struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
92         unsigned int work_done;
93
94         CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n",
95                        __func__, rxq->rspq.cntxt_id, nb_pkts);
96
97         if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
98                 dev_err(adapter, "error in cxgbe poll\n");
99
100         CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done);
101         return work_done;
102 }
103
104 static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
105                                struct rte_eth_dev_info *device_info)
106 {
107         struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
108         struct adapter *adapter = pi->adapter;
109         int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
110
111         device_info->min_rx_bufsize = 68; /* XXX: Smallest pkt size */
112         device_info->max_rx_pktlen = 1500; /* XXX: For now we support mtu */
113         device_info->max_rx_queues = max_queues;
114         device_info->max_tx_queues = max_queues;
115         device_info->max_mac_addrs = 1;
116         /* XXX: For now we support one MAC/port */
117         device_info->max_vfs = adapter->params.arch.vfcount;
118         device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
119
120         device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
121                                        DEV_RX_OFFLOAD_IPV4_CKSUM |
122                                        DEV_RX_OFFLOAD_UDP_CKSUM |
123                                        DEV_RX_OFFLOAD_TCP_CKSUM;
124
125         device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
126                                        DEV_TX_OFFLOAD_IPV4_CKSUM |
127                                        DEV_TX_OFFLOAD_UDP_CKSUM |
128                                        DEV_TX_OFFLOAD_TCP_CKSUM |
129                                        DEV_TX_OFFLOAD_TCP_TSO;
130
131         device_info->reta_size = pi->rss_size;
132 }
133
134 static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
135                                     uint16_t tx_queue_id);
136 static void cxgbe_dev_rx_queue_release(void *q);
137
138 static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
139 {
140         struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
141         struct adapter *adapter = pi->adapter;
142         int err;
143
144         CXGBE_FUNC_TRACE();
145
146         if (!(adapter->flags & FW_QUEUE_BOUND)) {
147                 err = setup_sge_fwevtq(adapter);
148                 if (err)
149                         return err;
150                 adapter->flags |= FW_QUEUE_BOUND;
151         }
152
153         err = cfg_queue_count(eth_dev);
154         if (err)
155                 return err;
156
157         return 0;
158 }
159
160 static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
161                                     uint16_t rx_queue_id)
162 {
163         struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
164         struct adapter *adap = pi->adapter;
165         struct sge_rspq *q;
166
167         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
168                   __func__, pi->port_id, rx_queue_id);
169
170         q = eth_dev->data->rx_queues[rx_queue_id];
171         return t4_sge_eth_rxq_start(adap, q);
172 }
173
174 static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
175                                    uint16_t rx_queue_id)
176 {
177         struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
178         struct adapter *adap = pi->adapter;
179         struct sge_rspq *q;
180
181         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
182                   __func__, pi->port_id, rx_queue_id);
183
184         q = eth_dev->data->rx_queues[rx_queue_id];
185         return t4_sge_eth_rxq_stop(adap, q);
186 }
187
188 static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
189                                     uint16_t queue_idx, uint16_t nb_desc,
190                                     unsigned int socket_id,
191                                     const struct rte_eth_rxconf *rx_conf,
192                                     struct rte_mempool *mp)
193 {
194         struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
195         struct adapter *adapter = pi->adapter;
196         struct sge *s = &adapter->sge;
197         struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
198         int err = 0;
199         int msi_idx = 0;
200         unsigned int temp_nb_desc;
201
202         RTE_SET_USED(rx_conf);
203
204         dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
205                   __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
206                   socket_id, mp);
207
208         /*  Free up the existing queue  */
209         if (eth_dev->data->rx_queues[queue_idx]) {
210                 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
211                 eth_dev->data->rx_queues[queue_idx] = NULL;
212         }
213
214         eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
215
216         /* Sanity Checking
217          *
218          * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
219          */
220         temp_nb_desc = nb_desc;
221         if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
222                 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
223                          __func__, CXGBE_MIN_RING_DESC_SIZE,
224                          CXGBE_DEFAULT_RX_DESC_SIZE);
225                 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
226         } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
227                 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
228                         __func__, CXGBE_MIN_RING_DESC_SIZE,
229                         CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
230                 return -(EINVAL);
231         }
232
233         rxq->rspq.size = temp_nb_desc;
234         if ((&rxq->fl) != NULL)
235                 rxq->fl.size = temp_nb_desc;
236
237         err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
238                                &rxq->fl, t4_ethrx_handler,
239                                t4_get_mps_bg_map(adapter, pi->tx_chan), mp,
240                                queue_idx, socket_id);
241
242         dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
243                   __func__, err, pi->port_id, rxq->rspq.cntxt_id);
244         return err;
245 }
246
247 static void cxgbe_dev_rx_queue_release(void *q)
248 {
249         struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
250         struct sge_rspq *rq = &rxq->rspq;
251
252         if (rq) {
253                 struct port_info *pi = (struct port_info *)
254                                        (rq->eth_dev->data->dev_private);
255                 struct adapter *adap = pi->adapter;
256
257                 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
258                           __func__, pi->port_id, rxq->rspq.cntxt_id);
259
260                 t4_sge_eth_rxq_release(adap, rxq);
261         }
262 }
263
264 static struct eth_dev_ops cxgbe_eth_dev_ops = {
265         .dev_configure          = cxgbe_dev_configure,
266         .dev_infos_get          = cxgbe_dev_info_get,
267         .rx_queue_setup         = cxgbe_dev_rx_queue_setup,
268         .rx_queue_start         = cxgbe_dev_rx_queue_start,
269         .rx_queue_stop          = cxgbe_dev_rx_queue_stop,
270         .rx_queue_release       = cxgbe_dev_rx_queue_release,
271 };
272
273 /*
274  * Initialize driver
275  * It returns 0 on success.
276  */
277 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
278 {
279         struct rte_pci_device *pci_dev;
280         struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
281         struct adapter *adapter = NULL;
282         char name[RTE_ETH_NAME_MAX_LEN];
283         int err = 0;
284
285         CXGBE_FUNC_TRACE();
286
287         eth_dev->dev_ops = &cxgbe_eth_dev_ops;
288         eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
289
290         /* for secondary processes, we don't initialise any further as primary
291          * has already done this work.
292          */
293         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
294                 return 0;
295
296         pci_dev = eth_dev->pci_dev;
297         snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
298         adapter = rte_zmalloc(name, sizeof(*adapter), 0);
299         if (!adapter)
300                 return -1;
301
302         adapter->use_unpacked_mode = 1;
303         adapter->regs = (void *)pci_dev->mem_resource[0].addr;
304         if (!adapter->regs) {
305                 dev_err(adapter, "%s: cannot map device registers\n", __func__);
306                 err = -ENOMEM;
307                 goto out_free_adapter;
308         }
309         adapter->pdev = pci_dev;
310         adapter->eth_dev = eth_dev;
311         pi->adapter = adapter;
312
313         err = cxgbe_probe(adapter);
314         if (err)
315                 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
316                         __func__, err);
317
318 out_free_adapter:
319         return err;
320 }
321
322 static struct eth_driver rte_cxgbe_pmd = {
323         {
324                 .name = "rte_cxgbe_pmd",
325                 .id_table = cxgb4_pci_tbl,
326                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
327         },
328         .eth_dev_init = eth_cxgbe_dev_init,
329         .dev_private_size = sizeof(struct port_info),
330 };
331
332 /*
333  * Driver initialization routine.
334  * Invoked once at EAL init time.
335  * Register itself as the [Poll Mode] Driver of PCI CXGBE devices.
336  */
337 static int rte_cxgbe_pmd_init(const char *name __rte_unused,
338                               const char *params __rte_unused)
339 {
340         CXGBE_FUNC_TRACE();
341
342         rte_eth_driver_register(&rte_cxgbe_pmd);
343         return 0;
344 }
345
346 static struct rte_driver rte_cxgbe_driver = {
347         .name = "cxgbe_driver",
348         .type = PMD_PDEV,
349         .init = rte_cxgbe_pmd_init,
350 };
351
352 PMD_REGISTER_DRIVER(rte_cxgbe_driver);