net/enic: fix MAC address add and remove
[dpdk.git] / drivers / net / enic / enic_main.c
1 /*
2  * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * Copyright (c) 2014, Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  *
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in
17  * the documentation and/or other materials provided with the
18  * distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34
35 #include <stdio.h>
36
37 #include <sys/stat.h>
38 #include <sys/mman.h>
39 #include <fcntl.h>
40 #include <libgen.h>
41
42 #include <rte_pci.h>
43 #include <rte_memzone.h>
44 #include <rte_malloc.h>
45 #include <rte_mbuf.h>
46 #include <rte_string_fns.h>
47 #include <rte_ethdev.h>
48
49 #include "enic_compat.h"
50 #include "enic.h"
51 #include "wq_enet_desc.h"
52 #include "rq_enet_desc.h"
53 #include "cq_enet_desc.h"
54 #include "vnic_enet.h"
55 #include "vnic_dev.h"
56 #include "vnic_wq.h"
57 #include "vnic_rq.h"
58 #include "vnic_cq.h"
59 #include "vnic_intr.h"
60 #include "vnic_nic.h"
61
62 static inline int enic_is_sriov_vf(struct enic *enic)
63 {
64         return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
65 }
66
67 static int is_zero_addr(uint8_t *addr)
68 {
69         return !(addr[0] |  addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
70 }
71
72 static int is_mcast_addr(uint8_t *addr)
73 {
74         return addr[0] & 1;
75 }
76
77 static int is_eth_addr_valid(uint8_t *addr)
78 {
79         return !is_mcast_addr(addr) && !is_zero_addr(addr);
80 }
81
82 static void
83 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
84 {
85         uint16_t i;
86
87         if (!rq || !rq->mbuf_ring) {
88                 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
89                 return;
90         }
91
92         for (i = 0; i < rq->ring.desc_count; i++) {
93                 if (rq->mbuf_ring[i]) {
94                         rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
95                         rq->mbuf_ring[i] = NULL;
96                 }
97         }
98 }
99
100 void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
101 {
102         vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
103 }
104
105 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
106 {
107         struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
108
109         rte_pktmbuf_free_seg(mbuf);
110         buf->mb = NULL;
111 }
112
113 static void enic_log_q_error(struct enic *enic)
114 {
115         unsigned int i;
116         u32 error_status;
117
118         for (i = 0; i < enic->wq_count; i++) {
119                 error_status = vnic_wq_error_status(&enic->wq[i]);
120                 if (error_status)
121                         dev_err(enic, "WQ[%d] error_status %d\n", i,
122                                 error_status);
123         }
124
125         for (i = 0; i < enic_vnic_rq_count(enic); i++) {
126                 if (!enic->rq[i].in_use)
127                         continue;
128                 error_status = vnic_rq_error_status(&enic->rq[i]);
129                 if (error_status)
130                         dev_err(enic, "RQ[%d] error_status %d\n", i,
131                                 error_status);
132         }
133 }
134
135 static void enic_clear_soft_stats(struct enic *enic)
136 {
137         struct enic_soft_stats *soft_stats = &enic->soft_stats;
138         rte_atomic64_clear(&soft_stats->rx_nombuf);
139         rte_atomic64_clear(&soft_stats->rx_packet_errors);
140 }
141
142 static void enic_init_soft_stats(struct enic *enic)
143 {
144         struct enic_soft_stats *soft_stats = &enic->soft_stats;
145         rte_atomic64_init(&soft_stats->rx_nombuf);
146         rte_atomic64_init(&soft_stats->rx_packet_errors);
147         enic_clear_soft_stats(enic);
148 }
149
150 void enic_dev_stats_clear(struct enic *enic)
151 {
152         if (vnic_dev_stats_clear(enic->vdev))
153                 dev_err(enic, "Error in clearing stats\n");
154         enic_clear_soft_stats(enic);
155 }
156
157 void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
158 {
159         struct vnic_stats *stats;
160         struct enic_soft_stats *soft_stats = &enic->soft_stats;
161         int64_t rx_truncated;
162         uint64_t rx_packet_errors;
163
164         if (vnic_dev_stats_dump(enic->vdev, &stats)) {
165                 dev_err(enic, "Error in getting stats\n");
166                 return;
167         }
168
169         /* The number of truncated packets can only be calculated by
170          * subtracting a hardware counter from error packets received by
171          * the driver. Note: this causes transient inaccuracies in the
172          * ipackets count. Also, the length of truncated packets are
173          * counted in ibytes even though truncated packets are dropped
174          * which can make ibytes be slightly higher than it should be.
175          */
176         rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
177         rx_truncated = rx_packet_errors - stats->rx.rx_errors;
178
179         r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
180         r_stats->opackets = stats->tx.tx_frames_ok;
181
182         r_stats->ibytes = stats->rx.rx_bytes_ok;
183         r_stats->obytes = stats->tx.tx_bytes_ok;
184
185         r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
186         r_stats->oerrors = stats->tx.tx_errors;
187
188         r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
189
190         r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
191 }
192
193 void enic_del_mac_address(struct enic *enic, int mac_index)
194 {
195         struct rte_eth_dev *eth_dev = enic->rte_dev;
196         uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
197
198         if (vnic_dev_del_addr(enic->vdev, mac_addr))
199                 dev_err(enic, "del mac addr failed\n");
200 }
201
202 void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
203 {
204         int err;
205
206         if (!is_eth_addr_valid(mac_addr)) {
207                 dev_err(enic, "invalid mac address\n");
208                 return;
209         }
210
211         err = vnic_dev_add_addr(enic->vdev, mac_addr);
212         if (err) {
213                 dev_err(enic, "add mac addr failed\n");
214                 return;
215         }
216 }
217
218 static void
219 enic_free_rq_buf(struct rte_mbuf **mbuf)
220 {
221         if (*mbuf == NULL)
222                 return;
223
224         rte_pktmbuf_free(*mbuf);
225         mbuf = NULL;
226 }
227
228 void enic_init_vnic_resources(struct enic *enic)
229 {
230         unsigned int error_interrupt_enable = 1;
231         unsigned int error_interrupt_offset = 0;
232         unsigned int index = 0;
233         unsigned int cq_idx;
234         struct vnic_rq *data_rq;
235
236         for (index = 0; index < enic->rq_count; index++) {
237                 cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
238
239                 vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
240                         cq_idx,
241                         error_interrupt_enable,
242                         error_interrupt_offset);
243
244                 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
245                 if (data_rq->in_use)
246                         vnic_rq_init(data_rq,
247                                      cq_idx,
248                                      error_interrupt_enable,
249                                      error_interrupt_offset);
250
251                 vnic_cq_init(&enic->cq[cq_idx],
252                         0 /* flow_control_enable */,
253                         1 /* color_enable */,
254                         0 /* cq_head */,
255                         0 /* cq_tail */,
256                         1 /* cq_tail_color */,
257                         0 /* interrupt_enable */,
258                         1 /* cq_entry_enable */,
259                         0 /* cq_message_enable */,
260                         0 /* interrupt offset */,
261                         0 /* cq_message_addr */);
262         }
263
264         for (index = 0; index < enic->wq_count; index++) {
265                 vnic_wq_init(&enic->wq[index],
266                         enic_cq_wq(enic, index),
267                         error_interrupt_enable,
268                         error_interrupt_offset);
269
270                 cq_idx = enic_cq_wq(enic, index);
271                 vnic_cq_init(&enic->cq[cq_idx],
272                         0 /* flow_control_enable */,
273                         1 /* color_enable */,
274                         0 /* cq_head */,
275                         0 /* cq_tail */,
276                         1 /* cq_tail_color */,
277                         0 /* interrupt_enable */,
278                         0 /* cq_entry_enable */,
279                         1 /* cq_message_enable */,
280                         0 /* interrupt offset */,
281                         (u64)enic->wq[index].cqmsg_rz->phys_addr);
282         }
283
284         vnic_intr_init(&enic->intr,
285                 enic->config.intr_timer_usec,
286                 enic->config.intr_timer_type,
287                 /*mask_on_assertion*/1);
288 }
289
290
291 static int
292 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
293 {
294         struct rte_mbuf *mb;
295         struct rq_enet_desc *rqd = rq->ring.descs;
296         unsigned i;
297         dma_addr_t dma_addr;
298
299         if (!rq->in_use)
300                 return 0;
301
302         dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
303                   rq->ring.desc_count);
304
305         for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
306                 mb = rte_mbuf_raw_alloc(rq->mp);
307                 if (mb == NULL) {
308                         dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
309                         (unsigned)rq->index);
310                         return -ENOMEM;
311                 }
312
313                 mb->data_off = RTE_PKTMBUF_HEADROOM;
314                 dma_addr = (dma_addr_t)(mb->buf_physaddr
315                            + RTE_PKTMBUF_HEADROOM);
316                 rq_enet_desc_enc(rqd, dma_addr,
317                                 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
318                                 : RQ_ENET_TYPE_NOT_SOP),
319                                 mb->buf_len - RTE_PKTMBUF_HEADROOM);
320                 rq->mbuf_ring[i] = mb;
321         }
322
323         /* make sure all prior writes are complete before doing the PIO write */
324         rte_rmb();
325
326         /* Post all but the last buffer to VIC. */
327         rq->posted_index = rq->ring.desc_count - 1;
328
329         rq->rx_nb_hold = 0;
330
331         dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
332                 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
333         iowrite32(rq->posted_index, &rq->ctrl->posted_index);
334         iowrite32(0, &rq->ctrl->fetch_index);
335         rte_rmb();
336
337         return 0;
338
339 }
340
341 static void *
342 enic_alloc_consistent(void *priv, size_t size,
343         dma_addr_t *dma_handle, u8 *name)
344 {
345         void *vaddr;
346         const struct rte_memzone *rz;
347         *dma_handle = 0;
348         struct enic *enic = (struct enic *)priv;
349         struct enic_memzone_entry *mze;
350
351         rz = rte_memzone_reserve_aligned((const char *)name,
352                                          size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
353         if (!rz) {
354                 pr_err("%s : Failed to allocate memory requested for %s\n",
355                         __func__, name);
356                 return NULL;
357         }
358
359         vaddr = rz->addr;
360         *dma_handle = (dma_addr_t)rz->phys_addr;
361
362         mze = rte_malloc("enic memzone entry",
363                          sizeof(struct enic_memzone_entry), 0);
364
365         if (!mze) {
366                 pr_err("%s : Failed to allocate memory for memzone list\n",
367                        __func__);
368                 rte_memzone_free(rz);
369         }
370
371         mze->rz = rz;
372
373         rte_spinlock_lock(&enic->memzone_list_lock);
374         LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
375         rte_spinlock_unlock(&enic->memzone_list_lock);
376
377         return vaddr;
378 }
379
380 static void
381 enic_free_consistent(void *priv,
382                      __rte_unused size_t size,
383                      void *vaddr,
384                      dma_addr_t dma_handle)
385 {
386         struct enic_memzone_entry *mze;
387         struct enic *enic = (struct enic *)priv;
388
389         rte_spinlock_lock(&enic->memzone_list_lock);
390         LIST_FOREACH(mze, &enic->memzone_list, entries) {
391                 if (mze->rz->addr == vaddr &&
392                     mze->rz->phys_addr == dma_handle)
393                         break;
394         }
395         if (mze == NULL) {
396                 rte_spinlock_unlock(&enic->memzone_list_lock);
397                 dev_warning(enic,
398                             "Tried to free memory, but couldn't find it in the memzone list\n");
399                 return;
400         }
401         LIST_REMOVE(mze, entries);
402         rte_spinlock_unlock(&enic->memzone_list_lock);
403         rte_memzone_free(mze->rz);
404         rte_free(mze);
405 }
406
407 int enic_link_update(struct enic *enic)
408 {
409         struct rte_eth_dev *eth_dev = enic->rte_dev;
410         int ret;
411         int link_status = 0;
412
413         link_status = enic_get_link_status(enic);
414         ret = (link_status == enic->link_status);
415         enic->link_status = link_status;
416         eth_dev->data->dev_link.link_status = link_status;
417         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
418         eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
419         return ret;
420 }
421
422 static void
423 enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
424         void *arg)
425 {
426         struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
427         struct enic *enic = pmd_priv(dev);
428
429         vnic_intr_return_all_credits(&enic->intr);
430
431         enic_link_update(enic);
432         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
433         enic_log_q_error(enic);
434 }
435
436 int enic_enable(struct enic *enic)
437 {
438         unsigned int index;
439         int err;
440         struct rte_eth_dev *eth_dev = enic->rte_dev;
441
442         eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
443         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
444
445         /* vnic notification of link status has already been turned on in
446          * enic_dev_init() which is called during probe time.  Here we are
447          * just turning on interrupt vector 0 if needed.
448          */
449         if (eth_dev->data->dev_conf.intr_conf.lsc)
450                 vnic_dev_notify_set(enic->vdev, 0);
451
452         if (enic_clsf_init(enic))
453                 dev_warning(enic, "Init of hash table for clsf failed."\
454                         "Flow director feature will not work\n");
455
456         for (index = 0; index < enic->rq_count; index++) {
457                 err = enic_alloc_rx_queue_mbufs(enic,
458                         &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
459                 if (err) {
460                         dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
461                         return err;
462                 }
463                 err = enic_alloc_rx_queue_mbufs(enic,
464                         &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
465                 if (err) {
466                         /* release the allocated mbufs for the sop rq*/
467                         enic_rxmbuf_queue_release(enic,
468                                 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
469
470                         dev_err(enic, "Failed to alloc data RX queue mbufs\n");
471                         return err;
472                 }
473         }
474
475         for (index = 0; index < enic->wq_count; index++)
476                 enic_start_wq(enic, index);
477         for (index = 0; index < enic->rq_count; index++)
478                 enic_start_rq(enic, index);
479
480         vnic_dev_add_addr(enic->vdev, enic->mac_addr);
481
482         vnic_dev_enable_wait(enic->vdev);
483
484         /* Register and enable error interrupt */
485         rte_intr_callback_register(&(enic->pdev->intr_handle),
486                 enic_intr_handler, (void *)enic->rte_dev);
487
488         rte_intr_enable(&(enic->pdev->intr_handle));
489         vnic_intr_unmask(&enic->intr);
490
491         return 0;
492 }
493
494 int enic_alloc_intr_resources(struct enic *enic)
495 {
496         int err;
497
498         dev_info(enic, "vNIC resources used:  "\
499                 "wq %d rq %d cq %d intr %d\n",
500                 enic->wq_count, enic_vnic_rq_count(enic),
501                 enic->cq_count, enic->intr_count);
502
503         err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
504         if (err)
505                 enic_free_vnic_resources(enic);
506
507         return err;
508 }
509
510 void enic_free_rq(void *rxq)
511 {
512         struct vnic_rq *rq_sop, *rq_data;
513         struct enic *enic;
514
515         if (rxq == NULL)
516                 return;
517
518         rq_sop = (struct vnic_rq *)rxq;
519         enic = vnic_dev_priv(rq_sop->vdev);
520         rq_data = &enic->rq[rq_sop->data_queue_idx];
521
522         enic_rxmbuf_queue_release(enic, rq_sop);
523         if (rq_data->in_use)
524                 enic_rxmbuf_queue_release(enic, rq_data);
525
526         rte_free(rq_sop->mbuf_ring);
527         if (rq_data->in_use)
528                 rte_free(rq_data->mbuf_ring);
529
530         rq_sop->mbuf_ring = NULL;
531         rq_data->mbuf_ring = NULL;
532
533         vnic_rq_free(rq_sop);
534         if (rq_data->in_use)
535                 vnic_rq_free(rq_data);
536
537         vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
538
539         rq_sop->in_use = 0;
540         rq_data->in_use = 0;
541 }
542
543 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
544 {
545         struct rte_eth_dev *eth_dev = enic->rte_dev;
546         vnic_wq_enable(&enic->wq[queue_idx]);
547         eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
548 }
549
550 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
551 {
552         struct rte_eth_dev *eth_dev = enic->rte_dev;
553         int ret;
554
555         ret = vnic_wq_disable(&enic->wq[queue_idx]);
556         if (ret)
557                 return ret;
558
559         eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
560         return 0;
561 }
562
563 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
564 {
565         struct vnic_rq *rq_sop;
566         struct vnic_rq *rq_data;
567         rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
568         rq_data = &enic->rq[rq_sop->data_queue_idx];
569         struct rte_eth_dev *eth_dev = enic->rte_dev;
570
571         if (rq_data->in_use)
572                 vnic_rq_enable(rq_data);
573         rte_mb();
574         vnic_rq_enable(rq_sop);
575         eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
576 }
577
578 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
579 {
580         int ret1 = 0, ret2 = 0;
581         struct rte_eth_dev *eth_dev = enic->rte_dev;
582         struct vnic_rq *rq_sop;
583         struct vnic_rq *rq_data;
584         rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
585         rq_data = &enic->rq[rq_sop->data_queue_idx];
586
587         ret2 = vnic_rq_disable(rq_sop);
588         rte_mb();
589         if (rq_data->in_use)
590                 ret1 = vnic_rq_disable(rq_data);
591
592         if (ret2)
593                 return ret2;
594         else if (ret1)
595                 return ret1;
596
597         eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
598         return 0;
599 }
600
601 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
602         unsigned int socket_id, struct rte_mempool *mp,
603         uint16_t nb_desc, uint16_t free_thresh)
604 {
605         int rc;
606         uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
607         uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
608         struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
609         struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
610         unsigned int mbuf_size, mbufs_per_pkt;
611         unsigned int nb_sop_desc, nb_data_desc;
612         uint16_t min_sop, max_sop, min_data, max_data;
613         uint16_t mtu = enic->rte_dev->data->mtu;
614
615         rq_sop->is_sop = 1;
616         rq_sop->data_queue_idx = data_queue_idx;
617         rq_data->is_sop = 0;
618         rq_data->data_queue_idx = 0;
619         rq_sop->socket_id = socket_id;
620         rq_sop->mp = mp;
621         rq_data->socket_id = socket_id;
622         rq_data->mp = mp;
623         rq_sop->in_use = 1;
624         rq_sop->rx_free_thresh = free_thresh;
625         rq_data->rx_free_thresh = free_thresh;
626         dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
627                   free_thresh);
628
629         mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
630                                RTE_PKTMBUF_HEADROOM);
631
632         if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
633                 dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
634                 /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
635                 mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
636                                  (mbuf_size - 1)) / mbuf_size;
637         } else {
638                 dev_info(enic, "Scatter rx mode disabled\n");
639                 mbufs_per_pkt = 1;
640         }
641
642         if (mbufs_per_pkt > 1) {
643                 dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
644                 rq_sop->data_queue_enable = 1;
645                 rq_data->in_use = 1;
646         } else {
647                 dev_info(enic, "Rq %u Scatter rx mode not being used\n",
648                          queue_idx);
649                 rq_sop->data_queue_enable = 0;
650                 rq_data->in_use = 0;
651         }
652
653         /* number of descriptors have to be a multiple of 32 */
654         nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
655         nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
656
657         rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
658         rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
659
660         if (mbufs_per_pkt > 1) {
661                 min_sop = 64;
662                 max_sop = ((enic->config.rq_desc_count /
663                             (mbufs_per_pkt - 1)) & ~0x1F);
664                 min_data = min_sop * (mbufs_per_pkt - 1);
665                 max_data = enic->config.rq_desc_count;
666         } else {
667                 min_sop = 64;
668                 max_sop = enic->config.rq_desc_count;
669                 min_data = 0;
670                 max_data = 0;
671         }
672
673         if (nb_desc < (min_sop + min_data)) {
674                 dev_warning(enic,
675                             "Number of rx descs too low, adjusting to minimum\n");
676                 nb_sop_desc = min_sop;
677                 nb_data_desc = min_data;
678         } else if (nb_desc > (max_sop + max_data)) {
679                 dev_warning(enic,
680                             "Number of rx_descs too high, adjusting to maximum\n");
681                 nb_sop_desc = max_sop;
682                 nb_data_desc = max_data;
683         }
684         if (mbufs_per_pkt > 1) {
685                 dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
686                          mtu, mbuf_size, min_sop + min_data,
687                          max_sop + max_data);
688         }
689         dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
690                  nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
691
692         /* Allocate sop queue resources */
693         rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
694                 nb_sop_desc, sizeof(struct rq_enet_desc));
695         if (rc) {
696                 dev_err(enic, "error in allocation of sop rq\n");
697                 goto err_exit;
698         }
699         nb_sop_desc = rq_sop->ring.desc_count;
700
701         if (rq_data->in_use) {
702                 /* Allocate data queue resources */
703                 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
704                                    nb_data_desc,
705                                    sizeof(struct rq_enet_desc));
706                 if (rc) {
707                         dev_err(enic, "error in allocation of data rq\n");
708                         goto err_free_rq_sop;
709                 }
710                 nb_data_desc = rq_data->ring.desc_count;
711         }
712         rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
713                            socket_id, nb_sop_desc + nb_data_desc,
714                            sizeof(struct cq_enet_rq_desc));
715         if (rc) {
716                 dev_err(enic, "error in allocation of cq for rq\n");
717                 goto err_free_rq_data;
718         }
719
720         /* Allocate the mbuf rings */
721         rq_sop->mbuf_ring = (struct rte_mbuf **)
722                 rte_zmalloc_socket("rq->mbuf_ring",
723                                    sizeof(struct rte_mbuf *) * nb_sop_desc,
724                                    RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
725         if (rq_sop->mbuf_ring == NULL)
726                 goto err_free_cq;
727
728         if (rq_data->in_use) {
729                 rq_data->mbuf_ring = (struct rte_mbuf **)
730                         rte_zmalloc_socket("rq->mbuf_ring",
731                                 sizeof(struct rte_mbuf *) * nb_data_desc,
732                                 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
733                 if (rq_data->mbuf_ring == NULL)
734                         goto err_free_sop_mbuf;
735         }
736
737         rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
738
739         return 0;
740
741 err_free_sop_mbuf:
742         rte_free(rq_sop->mbuf_ring);
743 err_free_cq:
744         /* cleanup on error */
745         vnic_cq_free(&enic->cq[queue_idx]);
746 err_free_rq_data:
747         if (rq_data->in_use)
748                 vnic_rq_free(rq_data);
749 err_free_rq_sop:
750         vnic_rq_free(rq_sop);
751 err_exit:
752         return -ENOMEM;
753 }
754
755 void enic_free_wq(void *txq)
756 {
757         struct vnic_wq *wq;
758         struct enic *enic;
759
760         if (txq == NULL)
761                 return;
762
763         wq = (struct vnic_wq *)txq;
764         enic = vnic_dev_priv(wq->vdev);
765         rte_memzone_free(wq->cqmsg_rz);
766         vnic_wq_free(wq);
767         vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
768 }
769
770 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
771         unsigned int socket_id, uint16_t nb_desc)
772 {
773         int err;
774         struct vnic_wq *wq = &enic->wq[queue_idx];
775         unsigned int cq_index = enic_cq_wq(enic, queue_idx);
776         char name[NAME_MAX];
777         static int instance;
778
779         wq->socket_id = socket_id;
780         if (nb_desc) {
781                 if (nb_desc > enic->config.wq_desc_count) {
782                         dev_warning(enic,
783                                 "WQ %d - number of tx desc in cmd line (%d)"\
784                                 "is greater than that in the UCSM/CIMC adapter"\
785                                 "policy.  Applying the value in the adapter "\
786                                 "policy (%d)\n",
787                                 queue_idx, nb_desc, enic->config.wq_desc_count);
788                 } else if (nb_desc != enic->config.wq_desc_count) {
789                         enic->config.wq_desc_count = nb_desc;
790                         dev_info(enic,
791                                 "TX Queues - effective number of descs:%d\n",
792                                 nb_desc);
793                 }
794         }
795
796         /* Allocate queue resources */
797         err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
798                 enic->config.wq_desc_count,
799                 sizeof(struct wq_enet_desc));
800         if (err) {
801                 dev_err(enic, "error in allocation of wq\n");
802                 return err;
803         }
804
805         err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
806                 socket_id, enic->config.wq_desc_count,
807                 sizeof(struct cq_enet_wq_desc));
808         if (err) {
809                 vnic_wq_free(wq);
810                 dev_err(enic, "error in allocation of cq for wq\n");
811         }
812
813         /* setup up CQ message */
814         snprintf((char *)name, sizeof(name),
815                  "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
816                 instance++);
817
818         wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
819                                                    sizeof(uint32_t),
820                                                    SOCKET_ID_ANY, 0,
821                                                    ENIC_ALIGN);
822         if (!wq->cqmsg_rz)
823                 return -ENOMEM;
824
825         return err;
826 }
827
828 int enic_disable(struct enic *enic)
829 {
830         unsigned int i;
831         int err;
832
833         vnic_intr_mask(&enic->intr);
834         (void)vnic_intr_masked(&enic->intr); /* flush write */
835         rte_intr_disable(&enic->pdev->intr_handle);
836         rte_intr_callback_unregister(&enic->pdev->intr_handle,
837                                      enic_intr_handler,
838                                      (void *)enic->rte_dev);
839
840         vnic_dev_disable(enic->vdev);
841
842         enic_clsf_destroy(enic);
843
844         if (!enic_is_sriov_vf(enic))
845                 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
846
847         for (i = 0; i < enic->wq_count; i++) {
848                 err = vnic_wq_disable(&enic->wq[i]);
849                 if (err)
850                         return err;
851         }
852         for (i = 0; i < enic_vnic_rq_count(enic); i++) {
853                 if (enic->rq[i].in_use) {
854                         err = vnic_rq_disable(&enic->rq[i]);
855                         if (err)
856                                 return err;
857                 }
858         }
859
860         /* If we were using interrupts, set the interrupt vector to -1
861          * to disable interrupts.  We are not disabling link notifcations,
862          * though, as we want the polling of link status to continue working.
863          */
864         if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
865                 vnic_dev_notify_set(enic->vdev, -1);
866
867         vnic_dev_set_reset_flag(enic->vdev, 1);
868
869         for (i = 0; i < enic->wq_count; i++)
870                 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
871
872         for (i = 0; i < enic_vnic_rq_count(enic); i++)
873                 if (enic->rq[i].in_use)
874                         vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
875         for (i = 0; i < enic->cq_count; i++)
876                 vnic_cq_clean(&enic->cq[i]);
877         vnic_intr_clean(&enic->intr);
878
879         return 0;
880 }
881
882 static int enic_dev_wait(struct vnic_dev *vdev,
883         int (*start)(struct vnic_dev *, int),
884         int (*finished)(struct vnic_dev *, int *),
885         int arg)
886 {
887         int done;
888         int err;
889         int i;
890
891         err = start(vdev, arg);
892         if (err)
893                 return err;
894
895         /* Wait for func to complete...2 seconds max */
896         for (i = 0; i < 2000; i++) {
897                 err = finished(vdev, &done);
898                 if (err)
899                         return err;
900                 if (done)
901                         return 0;
902                 usleep(1000);
903         }
904         return -ETIMEDOUT;
905 }
906
907 static int enic_dev_open(struct enic *enic)
908 {
909         int err;
910
911         err = enic_dev_wait(enic->vdev, vnic_dev_open,
912                 vnic_dev_open_done, 0);
913         if (err)
914                 dev_err(enic_get_dev(enic),
915                         "vNIC device open failed, err %d\n", err);
916
917         return err;
918 }
919
920 static int enic_set_rsskey(struct enic *enic)
921 {
922         dma_addr_t rss_key_buf_pa;
923         union vnic_rss_key *rss_key_buf_va = NULL;
924         static union vnic_rss_key rss_key = {
925                 .key = {
926                         [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
927                         [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
928                         [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
929                         [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
930                 }
931         };
932         int err;
933         u8 name[NAME_MAX];
934
935         snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
936         rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
937                 &rss_key_buf_pa, name);
938         if (!rss_key_buf_va)
939                 return -ENOMEM;
940
941         rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
942
943         err = enic_set_rss_key(enic,
944                 rss_key_buf_pa,
945                 sizeof(union vnic_rss_key));
946
947         enic_free_consistent(enic, sizeof(union vnic_rss_key),
948                 rss_key_buf_va, rss_key_buf_pa);
949
950         return err;
951 }
952
953 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
954 {
955         dma_addr_t rss_cpu_buf_pa;
956         union vnic_rss_cpu *rss_cpu_buf_va = NULL;
957         int i;
958         int err;
959         u8 name[NAME_MAX];
960
961         snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
962         rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
963                 &rss_cpu_buf_pa, name);
964         if (!rss_cpu_buf_va)
965                 return -ENOMEM;
966
967         for (i = 0; i < (1 << rss_hash_bits); i++)
968                 (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
969                         enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
970
971         err = enic_set_rss_cpu(enic,
972                 rss_cpu_buf_pa,
973                 sizeof(union vnic_rss_cpu));
974
975         enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
976                 rss_cpu_buf_va, rss_cpu_buf_pa);
977
978         return err;
979 }
980
981 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
982         u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
983 {
984         const u8 tso_ipid_split_en = 0;
985         int err;
986
987         /* Enable VLAN tag stripping */
988
989         err = enic_set_nic_cfg(enic,
990                 rss_default_cpu, rss_hash_type,
991                 rss_hash_bits, rss_base_cpu,
992                 rss_enable, tso_ipid_split_en,
993                 enic->ig_vlan_strip_en);
994
995         return err;
996 }
997
998 int enic_set_rss_nic_cfg(struct enic *enic)
999 {
1000         const u8 rss_default_cpu = 0;
1001         const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1002             NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1003             NIC_CFG_RSS_HASH_TYPE_IPV6 |
1004             NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1005         const u8 rss_hash_bits = 7;
1006         const u8 rss_base_cpu = 0;
1007         u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1008
1009         if (rss_enable) {
1010                 if (!enic_set_rsskey(enic)) {
1011                         if (enic_set_rsscpu(enic, rss_hash_bits)) {
1012                                 rss_enable = 0;
1013                                 dev_warning(enic, "RSS disabled, "\
1014                                         "Failed to set RSS cpu indirection table.");
1015                         }
1016                 } else {
1017                         rss_enable = 0;
1018                         dev_warning(enic,
1019                                 "RSS disabled, Failed to set RSS key.\n");
1020                 }
1021         }
1022
1023         return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1024                 rss_hash_bits, rss_base_cpu, rss_enable);
1025 }
1026
1027 int enic_setup_finish(struct enic *enic)
1028 {
1029         int ret;
1030
1031         enic_init_soft_stats(enic);
1032
1033         ret = enic_set_rss_nic_cfg(enic);
1034         if (ret) {
1035                 dev_err(enic, "Failed to config nic, aborting.\n");
1036                 return -1;
1037         }
1038
1039         /* Default conf */
1040         vnic_dev_packet_filter(enic->vdev,
1041                 1 /* directed  */,
1042                 1 /* multicast */,
1043                 1 /* broadcast */,
1044                 0 /* promisc   */,
1045                 1 /* allmulti  */);
1046
1047         enic->promisc = 0;
1048         enic->allmulti = 1;
1049
1050         return 0;
1051 }
1052
1053 void enic_add_packet_filter(struct enic *enic)
1054 {
1055         /* Args -> directed, multicast, broadcast, promisc, allmulti */
1056         vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1057                 enic->promisc, enic->allmulti);
1058 }
1059
1060 int enic_get_link_status(struct enic *enic)
1061 {
1062         return vnic_dev_link_status(enic->vdev);
1063 }
1064
1065 static void enic_dev_deinit(struct enic *enic)
1066 {
1067         struct rte_eth_dev *eth_dev = enic->rte_dev;
1068
1069         /* stop link status checking */
1070         vnic_dev_notify_unset(enic->vdev);
1071
1072         rte_free(eth_dev->data->mac_addrs);
1073 }
1074
1075
1076 int enic_set_vnic_res(struct enic *enic)
1077 {
1078         struct rte_eth_dev *eth_dev = enic->rte_dev;
1079         int rc = 0;
1080
1081         /* With Rx scatter support, two RQs are now used per RQ used by
1082          * the application.
1083          */
1084         if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
1085                 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1086                         eth_dev->data->nb_rx_queues,
1087                         eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
1088                 rc = -EINVAL;
1089         }
1090         if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
1091                 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1092                         eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1093                 rc = -EINVAL;
1094         }
1095
1096         if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
1097                                    eth_dev->data->nb_tx_queues)) {
1098                 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1099                         (eth_dev->data->nb_rx_queues +
1100                          eth_dev->data->nb_tx_queues), enic->conf_cq_count);
1101                 rc = -EINVAL;
1102         }
1103
1104         if (rc == 0) {
1105                 enic->rq_count = eth_dev->data->nb_rx_queues;
1106                 enic->wq_count = eth_dev->data->nb_tx_queues;
1107                 enic->cq_count = enic->rq_count + enic->wq_count;
1108         }
1109
1110         return rc;
1111 }
1112
1113 /* Initialize the completion queue for an RQ */
1114 static int
1115 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1116 {
1117         struct vnic_rq *sop_rq, *data_rq;
1118         unsigned int cq_idx = enic_cq_rq(enic, rq_idx);
1119         int rc = 0;
1120
1121         sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1122         data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
1123
1124         vnic_cq_clean(&enic->cq[cq_idx]);
1125         vnic_cq_init(&enic->cq[cq_idx],
1126                      0 /* flow_control_enable */,
1127                      1 /* color_enable */,
1128                      0 /* cq_head */,
1129                      0 /* cq_tail */,
1130                      1 /* cq_tail_color */,
1131                      0 /* interrupt_enable */,
1132                      1 /* cq_entry_enable */,
1133                      0 /* cq_message_enable */,
1134                      0 /* interrupt offset */,
1135                      0 /* cq_message_addr */);
1136
1137
1138         vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1139                            enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1140                            sop_rq->ring.desc_count - 1, 1, 0);
1141         if (data_rq->in_use) {
1142                 vnic_rq_init_start(data_rq,
1143                                    enic_cq_rq(enic,
1144                                    enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
1145                                    data_rq->ring.desc_count - 1, 1, 0);
1146         }
1147
1148         rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1149         if (rc)
1150                 return rc;
1151
1152         if (data_rq->in_use) {
1153                 rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1154                 if (rc) {
1155                         enic_rxmbuf_queue_release(enic, sop_rq);
1156                         return rc;
1157                 }
1158         }
1159
1160         return 0;
1161 }
1162
1163 /* The Cisco NIC can send and receive packets up to a max packet size
1164  * determined by the NIC type and firmware. There is also an MTU
1165  * configured into the NIC via the CIMC/UCSM management interface
1166  * which can be overridden by this function (up to the max packet size).
1167  * Depending on the network setup, doing so may cause packet drops
1168  * and unexpected behavior.
1169  */
1170 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1171 {
1172         unsigned int rq_idx;
1173         struct vnic_rq *rq;
1174         int rc = 0;
1175         uint16_t old_mtu;       /* previous setting */
1176         uint16_t config_mtu;    /* Value configured into NIC via CIMC/UCSM */
1177         struct rte_eth_dev *eth_dev = enic->rte_dev;
1178
1179         old_mtu = eth_dev->data->mtu;
1180         config_mtu = enic->config.mtu;
1181
1182         if (new_mtu > enic->max_mtu) {
1183                 dev_err(enic,
1184                         "MTU not updated: requested (%u) greater than max (%u)\n",
1185                         new_mtu, enic->max_mtu);
1186                 return -EINVAL;
1187         }
1188         if (new_mtu < ENIC_MIN_MTU) {
1189                 dev_info(enic,
1190                         "MTU not updated: requested (%u) less than min (%u)\n",
1191                         new_mtu, ENIC_MIN_MTU);
1192                 return -EINVAL;
1193         }
1194         if (new_mtu > config_mtu)
1195                 dev_warning(enic,
1196                         "MTU (%u) is greater than value configured in NIC (%u)\n",
1197                         new_mtu, config_mtu);
1198
1199         /* The easy case is when scatter is disabled. However if the MTU
1200          * becomes greater than the mbuf data size, packet drops will ensue.
1201          */
1202         if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
1203                 eth_dev->data->mtu = new_mtu;
1204                 goto set_mtu_done;
1205         }
1206
1207         /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
1208          * change Rx scatter mode if necessary for better performance. I.e. if
1209          * MTU was greater than the mbuf size and now it's less, scatter Rx
1210          * doesn't have to be used and vice versa.
1211           */
1212         rte_spinlock_lock(&enic->mtu_lock);
1213
1214         /* Stop traffic on all RQs */
1215         for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1216                 rq = &enic->rq[rq_idx];
1217                 if (rq->is_sop && rq->in_use) {
1218                         rc = enic_stop_rq(enic,
1219                                           enic_sop_rq_idx_to_rte_idx(rq_idx));
1220                         if (rc) {
1221                                 dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1222                                 goto set_mtu_done;
1223                         }
1224                 }
1225         }
1226
1227         /* replace Rx funciton with a no-op to avoid getting stale pkts */
1228         eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
1229         rte_mb();
1230
1231         /* Allow time for threads to exit the real Rx function. */
1232         usleep(100000);
1233
1234         /* now it is safe to reconfigure the RQs */
1235
1236         /* update the mtu */
1237         eth_dev->data->mtu = new_mtu;
1238
1239         /* free and reallocate RQs with the new MTU */
1240         for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1241                 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1242
1243                 enic_free_rq(rq);
1244                 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1245                                    rq->tot_nb_desc, rq->rx_free_thresh);
1246                 if (rc) {
1247                         dev_err(enic,
1248                                 "Fatal MTU alloc error- No traffic will pass\n");
1249                         goto set_mtu_done;
1250                 }
1251
1252                 rc = enic_reinit_rq(enic, rq_idx);
1253                 if (rc) {
1254                         dev_err(enic,
1255                                 "Fatal MTU RQ reinit- No traffic will pass\n");
1256                         goto set_mtu_done;
1257                 }
1258         }
1259
1260         /* put back the real receive function */
1261         rte_mb();
1262         eth_dev->rx_pkt_burst = enic_recv_pkts;
1263         rte_mb();
1264
1265         /* restart Rx traffic */
1266         for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1267                 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1268                 if (rq->is_sop && rq->in_use)
1269                         enic_start_rq(enic, rq_idx);
1270         }
1271
1272 set_mtu_done:
1273         dev_info(enic, "MTU changed from %u to %u\n",  old_mtu, new_mtu);
1274         rte_spinlock_unlock(&enic->mtu_lock);
1275         return rc;
1276 }
1277
1278 static int enic_dev_init(struct enic *enic)
1279 {
1280         int err;
1281         struct rte_eth_dev *eth_dev = enic->rte_dev;
1282
1283         vnic_dev_intr_coal_timer_info_default(enic->vdev);
1284
1285         /* Get vNIC configuration
1286         */
1287         err = enic_get_vnic_config(enic);
1288         if (err) {
1289                 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1290                 return err;
1291         }
1292
1293         /* Get available resource counts */
1294         enic_get_res_counts(enic);
1295         if (enic->conf_rq_count == 1) {
1296                 dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1297                 dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1298                 dev_err(enic, "See the ENIC PMD guide for more information.\n");
1299                 return -EINVAL;
1300         }
1301
1302         /* Get the supported filters */
1303         enic_fdir_info(enic);
1304
1305         eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
1306                                                 * ENIC_MAX_MAC_ADDR, 0);
1307         if (!eth_dev->data->mac_addrs) {
1308                 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1309                 return -1;
1310         }
1311         ether_addr_copy((struct ether_addr *) enic->mac_addr,
1312                         eth_dev->data->mac_addrs);
1313
1314         vnic_dev_set_reset_flag(enic->vdev, 0);
1315
1316         /* set up link status checking */
1317         vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1318
1319         return 0;
1320
1321 }
1322
1323 int enic_probe(struct enic *enic)
1324 {
1325         struct rte_pci_device *pdev = enic->pdev;
1326         int err = -1;
1327
1328         dev_debug(enic, " Initializing ENIC PMD\n");
1329
1330         enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1331         enic->bar0.len = pdev->mem_resource[0].len;
1332
1333         /* Register vNIC device */
1334         enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1335         if (!enic->vdev) {
1336                 dev_err(enic, "vNIC registration failed, aborting\n");
1337                 goto err_out;
1338         }
1339
1340         LIST_INIT(&enic->memzone_list);
1341         rte_spinlock_init(&enic->memzone_list_lock);
1342
1343         vnic_register_cbacks(enic->vdev,
1344                 enic_alloc_consistent,
1345                 enic_free_consistent);
1346
1347         /* Issue device open to get device in known state */
1348         err = enic_dev_open(enic);
1349         if (err) {
1350                 dev_err(enic, "vNIC dev open failed, aborting\n");
1351                 goto err_out_unregister;
1352         }
1353
1354         /* Set ingress vlan rewrite mode before vnic initialization */
1355         err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1356                 IG_VLAN_REWRITE_MODE_PASS_THRU);
1357         if (err) {
1358                 dev_err(enic,
1359                         "Failed to set ingress vlan rewrite mode, aborting.\n");
1360                 goto err_out_dev_close;
1361         }
1362
1363         /* Issue device init to initialize the vnic-to-switch link.
1364          * We'll start with carrier off and wait for link UP
1365          * notification later to turn on carrier.  We don't need
1366          * to wait here for the vnic-to-switch link initialization
1367          * to complete; link UP notification is the indication that
1368          * the process is complete.
1369          */
1370
1371         err = vnic_dev_init(enic->vdev, 0);
1372         if (err) {
1373                 dev_err(enic, "vNIC dev init failed, aborting\n");
1374                 goto err_out_dev_close;
1375         }
1376
1377         err = enic_dev_init(enic);
1378         if (err) {
1379                 dev_err(enic, "Device initialization failed, aborting\n");
1380                 goto err_out_dev_close;
1381         }
1382
1383         return 0;
1384
1385 err_out_dev_close:
1386         vnic_dev_close(enic->vdev);
1387 err_out_unregister:
1388         vnic_dev_unregister(enic->vdev);
1389 err_out:
1390         return err;
1391 }
1392
1393 void enic_remove(struct enic *enic)
1394 {
1395         enic_dev_deinit(enic);
1396         vnic_dev_close(enic->vdev);
1397         vnic_dev_unregister(enic->vdev);
1398 }