enic: disable debug traces
[dpdk.git] / lib / librte_pmd_enic / enic_ethdev.c
1 /*
2  * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * Copyright (c) 2014, Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  *
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in
17  * the documentation and/or other materials provided with the
18  * distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 #ident "$Id$"
35
36 #include <stdio.h>
37 #include <stdint.h>
38
39 #include <rte_dev.h>
40 #include <rte_pci.h>
41 #include <rte_ethdev.h>
42 #include <rte_string_fns.h>
43
44 #include "vnic_intr.h"
45 #include "vnic_cq.h"
46 #include "vnic_wq.h"
47 #include "vnic_rq.h"
48 #include "vnic_enet.h"
49 #include "enic.h"
50
51 #ifdef RTE_LIBRTE_ENIC_DEBUG
52 #define ENICPMD_FUNC_TRACE() \
53         RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__)
54 #else
55 #define ENICPMD_FUNC_TRACE() (void)0
56 #endif
57
58 /*
59  * The set of PCI devices this driver supports
60  */
61 static struct rte_pci_id pci_id_enic_map[] = {
62 #define RTE_PCI_DEV_ID_DECL_ENIC(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
63 #ifndef PCI_VENDOR_ID_CISCO
64 #define PCI_VENDOR_ID_CISCO     0x1137
65 #endif
66 #include "rte_pci_dev_ids.h"
67 RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET)
68 RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)
69 {.vendor_id = 0, /* Sentinal */},
70 };
71
72 static int enicpmd_fdir_remove_perfect_filter(struct rte_eth_dev *eth_dev,
73                 struct rte_fdir_filter *fdir_filter,
74                 __rte_unused uint16_t soft_id)
75 {
76         struct enic *enic = pmd_priv(eth_dev);
77
78         ENICPMD_FUNC_TRACE();
79         return enic_fdir_del_fltr(enic, fdir_filter);
80 }
81
82 static int enicpmd_fdir_add_perfect_filter(struct rte_eth_dev *eth_dev,
83         struct rte_fdir_filter *fdir_filter, __rte_unused uint16_t soft_id,
84         uint8_t queue, uint8_t drop)
85 {
86         struct enic *enic = pmd_priv(eth_dev);
87
88         ENICPMD_FUNC_TRACE();
89         return enic_fdir_add_fltr(enic, fdir_filter, (uint16_t)queue, drop);
90 }
91
92 static void enicpmd_fdir_info_get(struct rte_eth_dev *eth_dev,
93         struct rte_eth_fdir *fdir)
94 {
95         struct enic *enic = pmd_priv(eth_dev);
96
97         ENICPMD_FUNC_TRACE();
98         *fdir = enic->fdir.stats;
99 }
100
101 static void enicpmd_dev_tx_queue_release(void *txq)
102 {
103         ENICPMD_FUNC_TRACE();
104         enic_free_wq(txq);
105 }
106
107 static int enicpmd_dev_setup_intr(struct enic *enic)
108 {
109         int ret;
110         unsigned int index;
111
112         ENICPMD_FUNC_TRACE();
113
114         /* Are we done with the init of all the queues? */
115         for (index = 0; index < enic->cq_count; index++) {
116                 if (!enic->cq[index].ctrl)
117                         break;
118         }
119
120         if (enic->cq_count != index)
121                 return 0;
122
123         ret = enic_alloc_intr_resources(enic);
124         if (ret) {
125                 dev_err(enic, "alloc intr failed\n");
126                 return ret;
127         }
128         enic_init_vnic_resources(enic);
129
130         ret = enic_setup_finish(enic);
131         if (ret)
132                 dev_err(enic, "setup could not be finished\n");
133
134         return ret;
135 }
136
137 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
138         uint16_t queue_idx,
139         uint16_t nb_desc,
140         unsigned int socket_id,
141         __rte_unused const struct rte_eth_txconf *tx_conf)
142 {
143         int ret;
144         struct enic *enic = pmd_priv(eth_dev);
145
146         ENICPMD_FUNC_TRACE();
147         eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
148
149         ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
150         if (ret) {
151                 dev_err(enic, "error in allocating wq\n");
152                 return ret;
153         }
154
155         return enicpmd_dev_setup_intr(enic);
156 }
157
158 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
159         uint16_t queue_idx)
160 {
161         struct enic *enic = pmd_priv(eth_dev);
162
163         ENICPMD_FUNC_TRACE();
164
165         enic_start_wq(enic, queue_idx);
166
167         return 0;
168 }
169
170 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
171         uint16_t queue_idx)
172 {
173         int ret;
174         struct enic *enic = pmd_priv(eth_dev);
175
176         ENICPMD_FUNC_TRACE();
177
178         ret = enic_stop_wq(enic, queue_idx);
179         if (ret)
180                 dev_err(enic, "error in stopping wq %d\n", queue_idx);
181
182         return ret;
183 }
184
185 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
186         uint16_t queue_idx)
187 {
188         struct enic *enic = pmd_priv(eth_dev);
189
190         ENICPMD_FUNC_TRACE();
191
192         enic_start_rq(enic, queue_idx);
193
194         return 0;
195 }
196
197 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
198         uint16_t queue_idx)
199 {
200         int ret;
201         struct enic *enic = pmd_priv(eth_dev);
202
203         ENICPMD_FUNC_TRACE();
204
205         ret = enic_stop_rq(enic, queue_idx);
206         if (ret)
207                 dev_err(enic, "error in stopping rq %d\n", queue_idx);
208
209         return ret;
210 }
211
212 static void enicpmd_dev_rx_queue_release(void *rxq)
213 {
214         ENICPMD_FUNC_TRACE();
215         enic_free_rq(rxq);
216 }
217
218 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
219         uint16_t queue_idx,
220         uint16_t nb_desc,
221         unsigned int socket_id,
222         __rte_unused const struct rte_eth_rxconf *rx_conf,
223         struct rte_mempool *mp)
224 {
225         int ret;
226         struct enic *enic = pmd_priv(eth_dev);
227
228         ENICPMD_FUNC_TRACE();
229         eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx];
230
231         ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc);
232         if (ret) {
233                 dev_err(enic, "error in allocating rq\n");
234                 return ret;
235         }
236
237         return enicpmd_dev_setup_intr(enic);
238 }
239
240 static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
241         uint16_t vlan_id, int on)
242 {
243         struct enic *enic = pmd_priv(eth_dev);
244
245         ENICPMD_FUNC_TRACE();
246         if (on)
247                 enic_add_vlan(enic, vlan_id);
248         else
249                 enic_del_vlan(enic, vlan_id);
250         return 0;
251 }
252
253 static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
254 {
255         struct enic *enic = pmd_priv(eth_dev);
256
257         ENICPMD_FUNC_TRACE();
258
259         if (mask & ETH_VLAN_STRIP_MASK) {
260                 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
261                         enic->ig_vlan_strip_en = 1;
262                 else
263                         enic->ig_vlan_strip_en = 0;
264         }
265         enic_set_rss_nic_cfg(enic);
266
267
268         if (mask & ETH_VLAN_FILTER_MASK) {
269                 dev_warning(enic,
270                         "Configuration of VLAN filter is not supported\n");
271         }
272
273         if (mask & ETH_VLAN_EXTEND_MASK) {
274                 dev_warning(enic,
275                         "Configuration of extended VLAN is not supported\n");
276         }
277 }
278
279 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
280 {
281         int ret;
282         struct enic *enic = pmd_priv(eth_dev);
283
284         ENICPMD_FUNC_TRACE();
285         ret = enic_set_vnic_res(enic);
286         if (ret) {
287                 dev_err(enic, "Set vNIC resource num  failed, aborting\n");
288                 return ret;
289         }
290
291         if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
292                 eth_dev->data->dev_conf.rxmode.header_split) {
293                 /* Enable header-data-split */
294                 enic_set_hdr_split_size(enic,
295                         eth_dev->data->dev_conf.rxmode.split_hdr_size);
296         }
297
298         enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
299         return 0;
300 }
301
302 /* Start the device.
303  * It returns 0 on success.
304  */
305 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
306 {
307         struct enic *enic = pmd_priv(eth_dev);
308
309         ENICPMD_FUNC_TRACE();
310         return enic_enable(enic);
311 }
312
313 /*
314  * Stop device: disable rx and tx functions to allow for reconfiguring.
315  */
316 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
317 {
318         struct rte_eth_link link;
319         struct enic *enic = pmd_priv(eth_dev);
320
321         ENICPMD_FUNC_TRACE();
322         enic_disable(enic);
323         memset(&link, 0, sizeof(link));
324         rte_atomic64_cmpset((uint64_t *)&eth_dev->data->dev_link,
325                 *(uint64_t *)&eth_dev->data->dev_link,
326                 *(uint64_t *)&link);
327 }
328
329 /*
330  * Stop device.
331  */
332 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
333 {
334         struct enic *enic = pmd_priv(eth_dev);
335
336         ENICPMD_FUNC_TRACE();
337         enic_remove(enic);
338 }
339
340 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
341         __rte_unused int wait_to_complete)
342 {
343         struct enic *enic = pmd_priv(eth_dev);
344         int ret;
345         int link_status = 0;
346
347         ENICPMD_FUNC_TRACE();
348         link_status = enic_get_link_status(enic);
349         ret = (link_status == enic->link_status);
350         enic->link_status = link_status;
351         eth_dev->data->dev_link.link_status = link_status;
352         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
353         eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
354         return ret;
355 }
356
357 static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
358         struct rte_eth_stats *stats)
359 {
360         struct enic *enic = pmd_priv(eth_dev);
361
362         ENICPMD_FUNC_TRACE();
363         enic_dev_stats_get(enic, stats);
364 }
365
366 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
367 {
368         struct enic *enic = pmd_priv(eth_dev);
369
370         ENICPMD_FUNC_TRACE();
371         enic_dev_stats_clear(enic);
372 }
373
374 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
375         struct rte_eth_dev_info *device_info)
376 {
377         struct enic *enic = pmd_priv(eth_dev);
378
379         ENICPMD_FUNC_TRACE();
380         device_info->max_rx_queues = enic->rq_count;
381         device_info->max_tx_queues = enic->wq_count;
382         device_info->min_rx_bufsize = ENIC_MIN_MTU;
383         device_info->max_rx_pktlen = enic->config.mtu;
384         device_info->max_mac_addrs = 1;
385         device_info->rx_offload_capa =
386                 DEV_RX_OFFLOAD_VLAN_STRIP |
387                 DEV_RX_OFFLOAD_IPV4_CKSUM |
388                 DEV_RX_OFFLOAD_UDP_CKSUM  |
389                 DEV_RX_OFFLOAD_TCP_CKSUM;
390         device_info->tx_offload_capa =
391                 DEV_TX_OFFLOAD_VLAN_INSERT |
392                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
393                 DEV_TX_OFFLOAD_UDP_CKSUM   |
394                 DEV_TX_OFFLOAD_TCP_CKSUM;
395 }
396
397 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
398 {
399         struct enic *enic = pmd_priv(eth_dev);
400
401         ENICPMD_FUNC_TRACE();
402         enic->promisc = 1;
403         enic_add_packet_filter(enic);
404 }
405
406 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
407 {
408         struct enic *enic = pmd_priv(eth_dev);
409
410         ENICPMD_FUNC_TRACE();
411         enic->promisc = 0;
412         enic_add_packet_filter(enic);
413 }
414
415 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
416 {
417         struct enic *enic = pmd_priv(eth_dev);
418
419         ENICPMD_FUNC_TRACE();
420         enic->allmulti = 1;
421         enic_add_packet_filter(enic);
422 }
423
424 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
425 {
426         struct enic *enic = pmd_priv(eth_dev);
427
428         ENICPMD_FUNC_TRACE();
429         enic->allmulti = 0;
430         enic_add_packet_filter(enic);
431 }
432
433 static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
434         struct ether_addr *mac_addr,
435         __rte_unused uint32_t index, __rte_unused uint32_t pool)
436 {
437         struct enic *enic = pmd_priv(eth_dev);
438
439         ENICPMD_FUNC_TRACE();
440         enic_set_mac_address(enic, mac_addr->addr_bytes);
441 }
442
443 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index)
444 {
445         struct enic *enic = pmd_priv(eth_dev);
446
447         ENICPMD_FUNC_TRACE();
448         enic_del_mac_address(enic);
449 }
450
451
452 static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
453         uint16_t nb_pkts)
454 {
455         unsigned int index;
456         unsigned int frags;
457         unsigned int pkt_len;
458         unsigned int seg_len;
459         unsigned int inc_len;
460         unsigned int nb_segs;
461         struct rte_mbuf *tx_pkt;
462         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
463         struct enic *enic = vnic_dev_priv(wq->vdev);
464         unsigned short vlan_id;
465         unsigned short ol_flags;
466
467         for (index = 0; index < nb_pkts; index++) {
468                 tx_pkt = *tx_pkts++;
469                 inc_len = 0;
470                 nb_segs = tx_pkt->nb_segs;
471                 if (nb_segs > vnic_wq_desc_avail(wq)) {
472                         /* wq cleanup and try again */
473                         if (!enic_cleanup_wq(enic, wq) ||
474                                 (nb_segs > vnic_wq_desc_avail(wq)))
475                                 return index;
476                 }
477                 pkt_len = tx_pkt->pkt_len;
478                 vlan_id = tx_pkt->vlan_tci;
479                 ol_flags = tx_pkt->ol_flags;
480                 for (frags = 0; inc_len < pkt_len; frags++) {
481                         if (!tx_pkt)
482                                 break;
483                         seg_len = tx_pkt->data_len;
484                         inc_len += seg_len;
485                         if (enic_send_pkt(enic, wq, tx_pkt,
486                                     (unsigned short)seg_len, !frags,
487                                     (pkt_len == inc_len), ol_flags, vlan_id)) {
488                                 break;
489                         }
490                         tx_pkt = tx_pkt->next;
491                 }
492         }
493
494         enic_cleanup_wq(enic, wq);
495         return index;
496 }
497
498 static uint16_t enicpmd_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
499         uint16_t nb_pkts)
500 {
501         struct vnic_rq *rq = (struct vnic_rq *)rx_queue;
502         unsigned int work_done;
503
504         if (enic_poll(rq, rx_pkts, (unsigned int)nb_pkts, &work_done))
505                 dev_err(enic, "error in enicpmd poll\n");
506
507         return work_done;
508 }
509
510 static struct eth_dev_ops enicpmd_eth_dev_ops = {
511         .dev_configure        = enicpmd_dev_configure,
512         .dev_start            = enicpmd_dev_start,
513         .dev_stop             = enicpmd_dev_stop,
514         .dev_set_link_up      = NULL,
515         .dev_set_link_down    = NULL,
516         .dev_close            = enicpmd_dev_close,
517         .promiscuous_enable   = enicpmd_dev_promiscuous_enable,
518         .promiscuous_disable  = enicpmd_dev_promiscuous_disable,
519         .allmulticast_enable  = enicpmd_dev_allmulticast_enable,
520         .allmulticast_disable = enicpmd_dev_allmulticast_disable,
521         .link_update          = enicpmd_dev_link_update,
522         .stats_get            = enicpmd_dev_stats_get,
523         .stats_reset          = enicpmd_dev_stats_reset,
524         .queue_stats_mapping_set = NULL,
525         .dev_infos_get        = enicpmd_dev_info_get,
526         .mtu_set              = NULL,
527         .vlan_filter_set      = enicpmd_vlan_filter_set,
528         .vlan_tpid_set        = NULL,
529         .vlan_offload_set     = enicpmd_vlan_offload_set,
530         .vlan_strip_queue_set = NULL,
531         .rx_queue_start       = enicpmd_dev_rx_queue_start,
532         .rx_queue_stop        = enicpmd_dev_rx_queue_stop,
533         .tx_queue_start       = enicpmd_dev_tx_queue_start,
534         .tx_queue_stop        = enicpmd_dev_tx_queue_stop,
535         .rx_queue_setup       = enicpmd_dev_rx_queue_setup,
536         .rx_queue_release     = enicpmd_dev_rx_queue_release,
537         .rx_queue_count       = NULL,
538         .rx_descriptor_done   = NULL,
539         .tx_queue_setup       = enicpmd_dev_tx_queue_setup,
540         .tx_queue_release     = enicpmd_dev_tx_queue_release,
541         .dev_led_on           = NULL,
542         .dev_led_off          = NULL,
543         .flow_ctrl_get        = NULL,
544         .flow_ctrl_set        = NULL,
545         .priority_flow_ctrl_set = NULL,
546         .mac_addr_add         = enicpmd_add_mac_addr,
547         .mac_addr_remove      = enicpmd_remove_mac_addr,
548         .fdir_add_signature_filter    = NULL,
549         .fdir_update_signature_filter = NULL,
550         .fdir_remove_signature_filter = NULL,
551         .fdir_infos_get               = enicpmd_fdir_info_get,
552         .fdir_add_perfect_filter      = enicpmd_fdir_add_perfect_filter,
553         .fdir_update_perfect_filter   = enicpmd_fdir_add_perfect_filter,
554         .fdir_remove_perfect_filter   = enicpmd_fdir_remove_perfect_filter,
555         .fdir_set_masks               = NULL,
556 };
557
558 struct enic *enicpmd_list_head = NULL;
559 /* Initialize the driver
560  * It returns 0 on success.
561  */
562 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
563 {
564         struct rte_pci_device *pdev;
565         struct rte_pci_addr *addr;
566         struct enic *enic = pmd_priv(eth_dev);
567
568         ENICPMD_FUNC_TRACE();
569
570         enic->rte_dev = eth_dev;
571         eth_dev->dev_ops = &enicpmd_eth_dev_ops;
572         eth_dev->rx_pkt_burst = &enicpmd_recv_pkts;
573         eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
574
575         pdev = eth_dev->pci_dev;
576         enic->pdev = pdev;
577         addr = &pdev->addr;
578
579         snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
580                 addr->domain, addr->bus, addr->devid, addr->function);
581
582         return enic_probe(enic);
583 }
584
585 static struct eth_driver rte_enic_pmd = {
586         {
587                 .name = "rte_enic_pmd",
588                 .id_table = pci_id_enic_map,
589                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
590         },
591         .eth_dev_init = eth_enicpmd_dev_init,
592         .dev_private_size = sizeof(struct enic),
593 };
594
595 /* Driver initialization routine.
596  * Invoked once at EAL init time.
597  * Register as the [Poll Mode] Driver of Cisco ENIC device.
598  */
599 static int
600 rte_enic_pmd_init(const char *name __rte_unused,
601         const char *params __rte_unused)
602 {
603         ENICPMD_FUNC_TRACE();
604
605         rte_eth_driver_register(&rte_enic_pmd);
606         return 0;
607 }
608
609 static struct rte_driver rte_enic_driver = {
610         .type = PMD_PDEV,
611         .init = rte_enic_pmd_init,
612 };
613
614 PMD_REGISTER_DRIVER(rte_enic_driver);