net/enic: remove remaining header-split code
[dpdk.git] / drivers / net / enic / enic_main.c
1 /*
2  * Copyright 2008-2014 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  *
5  * Copyright (c) 2014, Cisco Systems, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  *
15  * 2. Redistributions in binary form must reproduce the above copyright
16  * notice, this list of conditions and the following disclaimer in
17  * the documentation and/or other materials provided with the
18  * distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34
35 #include <stdio.h>
36
37 #include <sys/stat.h>
38 #include <sys/mman.h>
39 #include <fcntl.h>
40 #include <libgen.h>
41
42 #include <rte_pci.h>
43 #include <rte_bus_pci.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_mbuf.h>
47 #include <rte_string_fns.h>
48 #include <rte_ethdev.h>
49
50 #include "enic_compat.h"
51 #include "enic.h"
52 #include "wq_enet_desc.h"
53 #include "rq_enet_desc.h"
54 #include "cq_enet_desc.h"
55 #include "vnic_enet.h"
56 #include "vnic_dev.h"
57 #include "vnic_wq.h"
58 #include "vnic_rq.h"
59 #include "vnic_cq.h"
60 #include "vnic_intr.h"
61 #include "vnic_nic.h"
62
63 static inline int enic_is_sriov_vf(struct enic *enic)
64 {
65         return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
66 }
67
68 static int is_zero_addr(uint8_t *addr)
69 {
70         return !(addr[0] |  addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
71 }
72
73 static int is_mcast_addr(uint8_t *addr)
74 {
75         return addr[0] & 1;
76 }
77
78 static int is_eth_addr_valid(uint8_t *addr)
79 {
80         return !is_mcast_addr(addr) && !is_zero_addr(addr);
81 }
82
83 static void
84 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
85 {
86         uint16_t i;
87
88         if (!rq || !rq->mbuf_ring) {
89                 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
90                 return;
91         }
92
93         for (i = 0; i < rq->ring.desc_count; i++) {
94                 if (rq->mbuf_ring[i]) {
95                         rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
96                         rq->mbuf_ring[i] = NULL;
97                 }
98         }
99 }
100
101 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
102 {
103         struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
104
105         rte_pktmbuf_free_seg(mbuf);
106         buf->mb = NULL;
107 }
108
109 static void enic_log_q_error(struct enic *enic)
110 {
111         unsigned int i;
112         u32 error_status;
113
114         for (i = 0; i < enic->wq_count; i++) {
115                 error_status = vnic_wq_error_status(&enic->wq[i]);
116                 if (error_status)
117                         dev_err(enic, "WQ[%d] error_status %d\n", i,
118                                 error_status);
119         }
120
121         for (i = 0; i < enic_vnic_rq_count(enic); i++) {
122                 if (!enic->rq[i].in_use)
123                         continue;
124                 error_status = vnic_rq_error_status(&enic->rq[i]);
125                 if (error_status)
126                         dev_err(enic, "RQ[%d] error_status %d\n", i,
127                                 error_status);
128         }
129 }
130
131 static void enic_clear_soft_stats(struct enic *enic)
132 {
133         struct enic_soft_stats *soft_stats = &enic->soft_stats;
134         rte_atomic64_clear(&soft_stats->rx_nombuf);
135         rte_atomic64_clear(&soft_stats->rx_packet_errors);
136         rte_atomic64_clear(&soft_stats->tx_oversized);
137 }
138
139 static void enic_init_soft_stats(struct enic *enic)
140 {
141         struct enic_soft_stats *soft_stats = &enic->soft_stats;
142         rte_atomic64_init(&soft_stats->rx_nombuf);
143         rte_atomic64_init(&soft_stats->rx_packet_errors);
144         rte_atomic64_init(&soft_stats->tx_oversized);
145         enic_clear_soft_stats(enic);
146 }
147
148 void enic_dev_stats_clear(struct enic *enic)
149 {
150         if (vnic_dev_stats_clear(enic->vdev))
151                 dev_err(enic, "Error in clearing stats\n");
152         enic_clear_soft_stats(enic);
153 }
154
155 int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
156 {
157         struct vnic_stats *stats;
158         struct enic_soft_stats *soft_stats = &enic->soft_stats;
159         int64_t rx_truncated;
160         uint64_t rx_packet_errors;
161         int ret = vnic_dev_stats_dump(enic->vdev, &stats);
162
163         if (ret) {
164                 dev_err(enic, "Error in getting stats\n");
165                 return ret;
166         }
167
168         /* The number of truncated packets can only be calculated by
169          * subtracting a hardware counter from error packets received by
170          * the driver. Note: this causes transient inaccuracies in the
171          * ipackets count. Also, the length of truncated packets are
172          * counted in ibytes even though truncated packets are dropped
173          * which can make ibytes be slightly higher than it should be.
174          */
175         rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
176         rx_truncated = rx_packet_errors - stats->rx.rx_errors;
177
178         r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
179         r_stats->opackets = stats->tx.tx_frames_ok;
180
181         r_stats->ibytes = stats->rx.rx_bytes_ok;
182         r_stats->obytes = stats->tx.tx_bytes_ok;
183
184         r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
185         r_stats->oerrors = stats->tx.tx_errors
186                            + rte_atomic64_read(&soft_stats->tx_oversized);
187
188         r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
189
190         r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
191         return 0;
192 }
193
194 void enic_del_mac_address(struct enic *enic, int mac_index)
195 {
196         struct rte_eth_dev *eth_dev = enic->rte_dev;
197         uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
198
199         if (vnic_dev_del_addr(enic->vdev, mac_addr))
200                 dev_err(enic, "del mac addr failed\n");
201 }
202
203 int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
204 {
205         int err;
206
207         if (!is_eth_addr_valid(mac_addr)) {
208                 dev_err(enic, "invalid mac address\n");
209                 return -EINVAL;
210         }
211
212         err = vnic_dev_add_addr(enic->vdev, mac_addr);
213         if (err)
214                 dev_err(enic, "add mac addr failed\n");
215         return err;
216 }
217
218 static void
219 enic_free_rq_buf(struct rte_mbuf **mbuf)
220 {
221         if (*mbuf == NULL)
222                 return;
223
224         rte_pktmbuf_free(*mbuf);
225         *mbuf = NULL;
226 }
227
228 void enic_init_vnic_resources(struct enic *enic)
229 {
230         unsigned int error_interrupt_enable = 1;
231         unsigned int error_interrupt_offset = 0;
232         unsigned int index = 0;
233         unsigned int cq_idx;
234         struct vnic_rq *data_rq;
235
236         for (index = 0; index < enic->rq_count; index++) {
237                 cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
238
239                 vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
240                         cq_idx,
241                         error_interrupt_enable,
242                         error_interrupt_offset);
243
244                 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
245                 if (data_rq->in_use)
246                         vnic_rq_init(data_rq,
247                                      cq_idx,
248                                      error_interrupt_enable,
249                                      error_interrupt_offset);
250
251                 vnic_cq_init(&enic->cq[cq_idx],
252                         0 /* flow_control_enable */,
253                         1 /* color_enable */,
254                         0 /* cq_head */,
255                         0 /* cq_tail */,
256                         1 /* cq_tail_color */,
257                         0 /* interrupt_enable */,
258                         1 /* cq_entry_enable */,
259                         0 /* cq_message_enable */,
260                         0 /* interrupt offset */,
261                         0 /* cq_message_addr */);
262         }
263
264         for (index = 0; index < enic->wq_count; index++) {
265                 vnic_wq_init(&enic->wq[index],
266                         enic_cq_wq(enic, index),
267                         error_interrupt_enable,
268                         error_interrupt_offset);
269
270                 cq_idx = enic_cq_wq(enic, index);
271                 vnic_cq_init(&enic->cq[cq_idx],
272                         0 /* flow_control_enable */,
273                         1 /* color_enable */,
274                         0 /* cq_head */,
275                         0 /* cq_tail */,
276                         1 /* cq_tail_color */,
277                         0 /* interrupt_enable */,
278                         0 /* cq_entry_enable */,
279                         1 /* cq_message_enable */,
280                         0 /* interrupt offset */,
281                         (u64)enic->wq[index].cqmsg_rz->iova);
282         }
283
284         vnic_intr_init(&enic->intr,
285                 enic->config.intr_timer_usec,
286                 enic->config.intr_timer_type,
287                 /*mask_on_assertion*/1);
288 }
289
290
291 static int
292 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
293 {
294         struct rte_mbuf *mb;
295         struct rq_enet_desc *rqd = rq->ring.descs;
296         unsigned i;
297         dma_addr_t dma_addr;
298
299         if (!rq->in_use)
300                 return 0;
301
302         dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
303                   rq->ring.desc_count);
304
305         for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
306                 mb = rte_mbuf_raw_alloc(rq->mp);
307                 if (mb == NULL) {
308                         dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
309                         (unsigned)rq->index);
310                         return -ENOMEM;
311                 }
312
313                 mb->data_off = RTE_PKTMBUF_HEADROOM;
314                 dma_addr = (dma_addr_t)(mb->buf_iova
315                            + RTE_PKTMBUF_HEADROOM);
316                 rq_enet_desc_enc(rqd, dma_addr,
317                                 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
318                                 : RQ_ENET_TYPE_NOT_SOP),
319                                 mb->buf_len - RTE_PKTMBUF_HEADROOM);
320                 rq->mbuf_ring[i] = mb;
321         }
322
323         /* make sure all prior writes are complete before doing the PIO write */
324         rte_rmb();
325
326         /* Post all but the last buffer to VIC. */
327         rq->posted_index = rq->ring.desc_count - 1;
328
329         rq->rx_nb_hold = 0;
330
331         dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
332                 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
333         iowrite32(rq->posted_index, &rq->ctrl->posted_index);
334         iowrite32(0, &rq->ctrl->fetch_index);
335         rte_rmb();
336
337         return 0;
338
339 }
340
341 static void *
342 enic_alloc_consistent(void *priv, size_t size,
343         dma_addr_t *dma_handle, u8 *name)
344 {
345         void *vaddr;
346         const struct rte_memzone *rz;
347         *dma_handle = 0;
348         struct enic *enic = (struct enic *)priv;
349         struct enic_memzone_entry *mze;
350
351         rz = rte_memzone_reserve_aligned((const char *)name,
352                                          size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
353         if (!rz) {
354                 pr_err("%s : Failed to allocate memory requested for %s\n",
355                         __func__, name);
356                 return NULL;
357         }
358
359         vaddr = rz->addr;
360         *dma_handle = (dma_addr_t)rz->iova;
361
362         mze = rte_malloc("enic memzone entry",
363                          sizeof(struct enic_memzone_entry), 0);
364
365         if (!mze) {
366                 pr_err("%s : Failed to allocate memory for memzone list\n",
367                        __func__);
368                 rte_memzone_free(rz);
369                 return NULL;
370         }
371
372         mze->rz = rz;
373
374         rte_spinlock_lock(&enic->memzone_list_lock);
375         LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
376         rte_spinlock_unlock(&enic->memzone_list_lock);
377
378         return vaddr;
379 }
380
381 static void
382 enic_free_consistent(void *priv,
383                      __rte_unused size_t size,
384                      void *vaddr,
385                      dma_addr_t dma_handle)
386 {
387         struct enic_memzone_entry *mze;
388         struct enic *enic = (struct enic *)priv;
389
390         rte_spinlock_lock(&enic->memzone_list_lock);
391         LIST_FOREACH(mze, &enic->memzone_list, entries) {
392                 if (mze->rz->addr == vaddr &&
393                     mze->rz->iova == dma_handle)
394                         break;
395         }
396         if (mze == NULL) {
397                 rte_spinlock_unlock(&enic->memzone_list_lock);
398                 dev_warning(enic,
399                             "Tried to free memory, but couldn't find it in the memzone list\n");
400                 return;
401         }
402         LIST_REMOVE(mze, entries);
403         rte_spinlock_unlock(&enic->memzone_list_lock);
404         rte_memzone_free(mze->rz);
405         rte_free(mze);
406 }
407
408 int enic_link_update(struct enic *enic)
409 {
410         struct rte_eth_dev *eth_dev = enic->rte_dev;
411         int ret;
412         int link_status = 0;
413
414         link_status = enic_get_link_status(enic);
415         ret = (link_status == enic->link_status);
416         enic->link_status = link_status;
417         eth_dev->data->dev_link.link_status = link_status;
418         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
419         eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
420         return ret;
421 }
422
423 static void
424 enic_intr_handler(void *arg)
425 {
426         struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
427         struct enic *enic = pmd_priv(dev);
428
429         vnic_intr_return_all_credits(&enic->intr);
430
431         enic_link_update(enic);
432         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
433         enic_log_q_error(enic);
434 }
435
436 int enic_enable(struct enic *enic)
437 {
438         unsigned int index;
439         int err;
440         struct rte_eth_dev *eth_dev = enic->rte_dev;
441
442         eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
443         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
444
445         /* vnic notification of link status has already been turned on in
446          * enic_dev_init() which is called during probe time.  Here we are
447          * just turning on interrupt vector 0 if needed.
448          */
449         if (eth_dev->data->dev_conf.intr_conf.lsc)
450                 vnic_dev_notify_set(enic->vdev, 0);
451
452         if (enic_clsf_init(enic))
453                 dev_warning(enic, "Init of hash table for clsf failed."\
454                         "Flow director feature will not work\n");
455
456         for (index = 0; index < enic->rq_count; index++) {
457                 err = enic_alloc_rx_queue_mbufs(enic,
458                         &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
459                 if (err) {
460                         dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
461                         return err;
462                 }
463                 err = enic_alloc_rx_queue_mbufs(enic,
464                         &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
465                 if (err) {
466                         /* release the allocated mbufs for the sop rq*/
467                         enic_rxmbuf_queue_release(enic,
468                                 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
469
470                         dev_err(enic, "Failed to alloc data RX queue mbufs\n");
471                         return err;
472                 }
473         }
474
475         for (index = 0; index < enic->wq_count; index++)
476                 enic_start_wq(enic, index);
477         for (index = 0; index < enic->rq_count; index++)
478                 enic_start_rq(enic, index);
479
480         vnic_dev_add_addr(enic->vdev, enic->mac_addr);
481
482         vnic_dev_enable_wait(enic->vdev);
483
484         /* Register and enable error interrupt */
485         rte_intr_callback_register(&(enic->pdev->intr_handle),
486                 enic_intr_handler, (void *)enic->rte_dev);
487
488         rte_intr_enable(&(enic->pdev->intr_handle));
489         vnic_intr_unmask(&enic->intr);
490
491         return 0;
492 }
493
494 int enic_alloc_intr_resources(struct enic *enic)
495 {
496         int err;
497
498         dev_info(enic, "vNIC resources used:  "\
499                 "wq %d rq %d cq %d intr %d\n",
500                 enic->wq_count, enic_vnic_rq_count(enic),
501                 enic->cq_count, enic->intr_count);
502
503         err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
504         if (err)
505                 enic_free_vnic_resources(enic);
506
507         return err;
508 }
509
510 void enic_free_rq(void *rxq)
511 {
512         struct vnic_rq *rq_sop, *rq_data;
513         struct enic *enic;
514
515         if (rxq == NULL)
516                 return;
517
518         rq_sop = (struct vnic_rq *)rxq;
519         enic = vnic_dev_priv(rq_sop->vdev);
520         rq_data = &enic->rq[rq_sop->data_queue_idx];
521
522         enic_rxmbuf_queue_release(enic, rq_sop);
523         if (rq_data->in_use)
524                 enic_rxmbuf_queue_release(enic, rq_data);
525
526         rte_free(rq_sop->mbuf_ring);
527         if (rq_data->in_use)
528                 rte_free(rq_data->mbuf_ring);
529
530         rq_sop->mbuf_ring = NULL;
531         rq_data->mbuf_ring = NULL;
532
533         vnic_rq_free(rq_sop);
534         if (rq_data->in_use)
535                 vnic_rq_free(rq_data);
536
537         vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
538
539         rq_sop->in_use = 0;
540         rq_data->in_use = 0;
541 }
542
543 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
544 {
545         struct rte_eth_dev *eth_dev = enic->rte_dev;
546         vnic_wq_enable(&enic->wq[queue_idx]);
547         eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
548 }
549
550 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
551 {
552         struct rte_eth_dev *eth_dev = enic->rte_dev;
553         int ret;
554
555         ret = vnic_wq_disable(&enic->wq[queue_idx]);
556         if (ret)
557                 return ret;
558
559         eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
560         return 0;
561 }
562
563 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
564 {
565         struct vnic_rq *rq_sop;
566         struct vnic_rq *rq_data;
567         rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
568         rq_data = &enic->rq[rq_sop->data_queue_idx];
569         struct rte_eth_dev *eth_dev = enic->rte_dev;
570
571         if (rq_data->in_use)
572                 vnic_rq_enable(rq_data);
573         rte_mb();
574         vnic_rq_enable(rq_sop);
575         eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
576 }
577
578 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
579 {
580         int ret1 = 0, ret2 = 0;
581         struct rte_eth_dev *eth_dev = enic->rte_dev;
582         struct vnic_rq *rq_sop;
583         struct vnic_rq *rq_data;
584         rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
585         rq_data = &enic->rq[rq_sop->data_queue_idx];
586
587         ret2 = vnic_rq_disable(rq_sop);
588         rte_mb();
589         if (rq_data->in_use)
590                 ret1 = vnic_rq_disable(rq_data);
591
592         if (ret2)
593                 return ret2;
594         else if (ret1)
595                 return ret1;
596
597         eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
598         return 0;
599 }
600
601 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
602         unsigned int socket_id, struct rte_mempool *mp,
603         uint16_t nb_desc, uint16_t free_thresh)
604 {
605         int rc;
606         uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
607         uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
608         struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
609         struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
610         unsigned int mbuf_size, mbufs_per_pkt;
611         unsigned int nb_sop_desc, nb_data_desc;
612         uint16_t min_sop, max_sop, min_data, max_data;
613         uint16_t mtu = enic->rte_dev->data->mtu;
614
615         rq_sop->is_sop = 1;
616         rq_sop->data_queue_idx = data_queue_idx;
617         rq_data->is_sop = 0;
618         rq_data->data_queue_idx = 0;
619         rq_sop->socket_id = socket_id;
620         rq_sop->mp = mp;
621         rq_data->socket_id = socket_id;
622         rq_data->mp = mp;
623         rq_sop->in_use = 1;
624         rq_sop->rx_free_thresh = free_thresh;
625         rq_data->rx_free_thresh = free_thresh;
626         dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
627                   free_thresh);
628
629         mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
630                                RTE_PKTMBUF_HEADROOM);
631
632         if (enic->rte_dev->data->dev_conf.rxmode.offloads &
633             DEV_RX_OFFLOAD_SCATTER) {
634                 dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
635                 /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
636                 mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
637                                  (mbuf_size - 1)) / mbuf_size;
638         } else {
639                 dev_info(enic, "Scatter rx mode disabled\n");
640                 mbufs_per_pkt = 1;
641         }
642
643         if (mbufs_per_pkt > 1) {
644                 dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
645                 rq_sop->data_queue_enable = 1;
646                 rq_data->in_use = 1;
647         } else {
648                 dev_info(enic, "Rq %u Scatter rx mode not being used\n",
649                          queue_idx);
650                 rq_sop->data_queue_enable = 0;
651                 rq_data->in_use = 0;
652         }
653
654         /* number of descriptors have to be a multiple of 32 */
655         nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
656         nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
657
658         rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
659         rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
660
661         if (mbufs_per_pkt > 1) {
662                 min_sop = 64;
663                 max_sop = ((enic->config.rq_desc_count /
664                             (mbufs_per_pkt - 1)) & ~0x1F);
665                 min_data = min_sop * (mbufs_per_pkt - 1);
666                 max_data = enic->config.rq_desc_count;
667         } else {
668                 min_sop = 64;
669                 max_sop = enic->config.rq_desc_count;
670                 min_data = 0;
671                 max_data = 0;
672         }
673
674         if (nb_desc < (min_sop + min_data)) {
675                 dev_warning(enic,
676                             "Number of rx descs too low, adjusting to minimum\n");
677                 nb_sop_desc = min_sop;
678                 nb_data_desc = min_data;
679         } else if (nb_desc > (max_sop + max_data)) {
680                 dev_warning(enic,
681                             "Number of rx_descs too high, adjusting to maximum\n");
682                 nb_sop_desc = max_sop;
683                 nb_data_desc = max_data;
684         }
685         if (mbufs_per_pkt > 1) {
686                 dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
687                          mtu, mbuf_size, min_sop + min_data,
688                          max_sop + max_data);
689         }
690         dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
691                  nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
692
693         /* Allocate sop queue resources */
694         rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
695                 nb_sop_desc, sizeof(struct rq_enet_desc));
696         if (rc) {
697                 dev_err(enic, "error in allocation of sop rq\n");
698                 goto err_exit;
699         }
700         nb_sop_desc = rq_sop->ring.desc_count;
701
702         if (rq_data->in_use) {
703                 /* Allocate data queue resources */
704                 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
705                                    nb_data_desc,
706                                    sizeof(struct rq_enet_desc));
707                 if (rc) {
708                         dev_err(enic, "error in allocation of data rq\n");
709                         goto err_free_rq_sop;
710                 }
711                 nb_data_desc = rq_data->ring.desc_count;
712         }
713         rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
714                            socket_id, nb_sop_desc + nb_data_desc,
715                            sizeof(struct cq_enet_rq_desc));
716         if (rc) {
717                 dev_err(enic, "error in allocation of cq for rq\n");
718                 goto err_free_rq_data;
719         }
720
721         /* Allocate the mbuf rings */
722         rq_sop->mbuf_ring = (struct rte_mbuf **)
723                 rte_zmalloc_socket("rq->mbuf_ring",
724                                    sizeof(struct rte_mbuf *) * nb_sop_desc,
725                                    RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
726         if (rq_sop->mbuf_ring == NULL)
727                 goto err_free_cq;
728
729         if (rq_data->in_use) {
730                 rq_data->mbuf_ring = (struct rte_mbuf **)
731                         rte_zmalloc_socket("rq->mbuf_ring",
732                                 sizeof(struct rte_mbuf *) * nb_data_desc,
733                                 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
734                 if (rq_data->mbuf_ring == NULL)
735                         goto err_free_sop_mbuf;
736         }
737
738         rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
739
740         return 0;
741
742 err_free_sop_mbuf:
743         rte_free(rq_sop->mbuf_ring);
744 err_free_cq:
745         /* cleanup on error */
746         vnic_cq_free(&enic->cq[queue_idx]);
747 err_free_rq_data:
748         if (rq_data->in_use)
749                 vnic_rq_free(rq_data);
750 err_free_rq_sop:
751         vnic_rq_free(rq_sop);
752 err_exit:
753         return -ENOMEM;
754 }
755
756 void enic_free_wq(void *txq)
757 {
758         struct vnic_wq *wq;
759         struct enic *enic;
760
761         if (txq == NULL)
762                 return;
763
764         wq = (struct vnic_wq *)txq;
765         enic = vnic_dev_priv(wq->vdev);
766         rte_memzone_free(wq->cqmsg_rz);
767         vnic_wq_free(wq);
768         vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
769 }
770
771 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
772         unsigned int socket_id, uint16_t nb_desc)
773 {
774         int err;
775         struct vnic_wq *wq = &enic->wq[queue_idx];
776         unsigned int cq_index = enic_cq_wq(enic, queue_idx);
777         char name[NAME_MAX];
778         static int instance;
779
780         wq->socket_id = socket_id;
781         if (nb_desc) {
782                 if (nb_desc > enic->config.wq_desc_count) {
783                         dev_warning(enic,
784                                 "WQ %d - number of tx desc in cmd line (%d)"\
785                                 "is greater than that in the UCSM/CIMC adapter"\
786                                 "policy.  Applying the value in the adapter "\
787                                 "policy (%d)\n",
788                                 queue_idx, nb_desc, enic->config.wq_desc_count);
789                 } else if (nb_desc != enic->config.wq_desc_count) {
790                         enic->config.wq_desc_count = nb_desc;
791                         dev_info(enic,
792                                 "TX Queues - effective number of descs:%d\n",
793                                 nb_desc);
794                 }
795         }
796
797         /* Allocate queue resources */
798         err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
799                 enic->config.wq_desc_count,
800                 sizeof(struct wq_enet_desc));
801         if (err) {
802                 dev_err(enic, "error in allocation of wq\n");
803                 return err;
804         }
805
806         err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
807                 socket_id, enic->config.wq_desc_count,
808                 sizeof(struct cq_enet_wq_desc));
809         if (err) {
810                 vnic_wq_free(wq);
811                 dev_err(enic, "error in allocation of cq for wq\n");
812         }
813
814         /* setup up CQ message */
815         snprintf((char *)name, sizeof(name),
816                  "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
817                 instance++);
818
819         wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
820                                                    sizeof(uint32_t),
821                                                    SOCKET_ID_ANY, 0,
822                                                    ENIC_ALIGN);
823         if (!wq->cqmsg_rz)
824                 return -ENOMEM;
825
826         return err;
827 }
828
829 int enic_disable(struct enic *enic)
830 {
831         unsigned int i;
832         int err;
833
834         vnic_intr_mask(&enic->intr);
835         (void)vnic_intr_masked(&enic->intr); /* flush write */
836         rte_intr_disable(&enic->pdev->intr_handle);
837         rte_intr_callback_unregister(&enic->pdev->intr_handle,
838                                      enic_intr_handler,
839                                      (void *)enic->rte_dev);
840
841         vnic_dev_disable(enic->vdev);
842
843         enic_clsf_destroy(enic);
844
845         if (!enic_is_sriov_vf(enic))
846                 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
847
848         for (i = 0; i < enic->wq_count; i++) {
849                 err = vnic_wq_disable(&enic->wq[i]);
850                 if (err)
851                         return err;
852         }
853         for (i = 0; i < enic_vnic_rq_count(enic); i++) {
854                 if (enic->rq[i].in_use) {
855                         err = vnic_rq_disable(&enic->rq[i]);
856                         if (err)
857                                 return err;
858                 }
859         }
860
861         /* If we were using interrupts, set the interrupt vector to -1
862          * to disable interrupts.  We are not disabling link notifcations,
863          * though, as we want the polling of link status to continue working.
864          */
865         if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
866                 vnic_dev_notify_set(enic->vdev, -1);
867
868         vnic_dev_set_reset_flag(enic->vdev, 1);
869
870         for (i = 0; i < enic->wq_count; i++)
871                 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
872
873         for (i = 0; i < enic_vnic_rq_count(enic); i++)
874                 if (enic->rq[i].in_use)
875                         vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
876         for (i = 0; i < enic->cq_count; i++)
877                 vnic_cq_clean(&enic->cq[i]);
878         vnic_intr_clean(&enic->intr);
879
880         return 0;
881 }
882
883 static int enic_dev_wait(struct vnic_dev *vdev,
884         int (*start)(struct vnic_dev *, int),
885         int (*finished)(struct vnic_dev *, int *),
886         int arg)
887 {
888         int done;
889         int err;
890         int i;
891
892         err = start(vdev, arg);
893         if (err)
894                 return err;
895
896         /* Wait for func to complete...2 seconds max */
897         for (i = 0; i < 2000; i++) {
898                 err = finished(vdev, &done);
899                 if (err)
900                         return err;
901                 if (done)
902                         return 0;
903                 usleep(1000);
904         }
905         return -ETIMEDOUT;
906 }
907
908 static int enic_dev_open(struct enic *enic)
909 {
910         int err;
911
912         err = enic_dev_wait(enic->vdev, vnic_dev_open,
913                 vnic_dev_open_done, 0);
914         if (err)
915                 dev_err(enic_get_dev(enic),
916                         "vNIC device open failed, err %d\n", err);
917
918         return err;
919 }
920
921 static int enic_set_rsskey(struct enic *enic)
922 {
923         dma_addr_t rss_key_buf_pa;
924         union vnic_rss_key *rss_key_buf_va = NULL;
925         static union vnic_rss_key rss_key = {
926                 .key = {
927                         [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
928                         [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
929                         [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
930                         [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
931                 }
932         };
933         int err;
934         u8 name[NAME_MAX];
935
936         snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
937         rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
938                 &rss_key_buf_pa, name);
939         if (!rss_key_buf_va)
940                 return -ENOMEM;
941
942         rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
943
944         err = enic_set_rss_key(enic,
945                 rss_key_buf_pa,
946                 sizeof(union vnic_rss_key));
947
948         enic_free_consistent(enic, sizeof(union vnic_rss_key),
949                 rss_key_buf_va, rss_key_buf_pa);
950
951         return err;
952 }
953
954 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
955 {
956         dma_addr_t rss_cpu_buf_pa;
957         union vnic_rss_cpu *rss_cpu_buf_va = NULL;
958         int i;
959         int err;
960         u8 name[NAME_MAX];
961
962         snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
963         rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
964                 &rss_cpu_buf_pa, name);
965         if (!rss_cpu_buf_va)
966                 return -ENOMEM;
967
968         for (i = 0; i < (1 << rss_hash_bits); i++)
969                 (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
970                         enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
971
972         err = enic_set_rss_cpu(enic,
973                 rss_cpu_buf_pa,
974                 sizeof(union vnic_rss_cpu));
975
976         enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
977                 rss_cpu_buf_va, rss_cpu_buf_pa);
978
979         return err;
980 }
981
982 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
983         u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
984 {
985         const u8 tso_ipid_split_en = 0;
986         int err;
987
988         /* Enable VLAN tag stripping */
989
990         err = enic_set_nic_cfg(enic,
991                 rss_default_cpu, rss_hash_type,
992                 rss_hash_bits, rss_base_cpu,
993                 rss_enable, tso_ipid_split_en,
994                 enic->ig_vlan_strip_en);
995
996         return err;
997 }
998
999 int enic_set_rss_nic_cfg(struct enic *enic)
1000 {
1001         const u8 rss_default_cpu = 0;
1002         const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
1003             NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
1004             NIC_CFG_RSS_HASH_TYPE_IPV6 |
1005             NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1006         const u8 rss_hash_bits = 7;
1007         const u8 rss_base_cpu = 0;
1008         u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1009
1010         if (rss_enable) {
1011                 if (!enic_set_rsskey(enic)) {
1012                         if (enic_set_rsscpu(enic, rss_hash_bits)) {
1013                                 rss_enable = 0;
1014                                 dev_warning(enic, "RSS disabled, "\
1015                                         "Failed to set RSS cpu indirection table.");
1016                         }
1017                 } else {
1018                         rss_enable = 0;
1019                         dev_warning(enic,
1020                                 "RSS disabled, Failed to set RSS key.\n");
1021                 }
1022         }
1023
1024         return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1025                 rss_hash_bits, rss_base_cpu, rss_enable);
1026 }
1027
1028 int enic_setup_finish(struct enic *enic)
1029 {
1030         int ret;
1031
1032         enic_init_soft_stats(enic);
1033
1034         ret = enic_set_rss_nic_cfg(enic);
1035         if (ret) {
1036                 dev_err(enic, "Failed to config nic, aborting.\n");
1037                 return -1;
1038         }
1039
1040         /* Default conf */
1041         vnic_dev_packet_filter(enic->vdev,
1042                 1 /* directed  */,
1043                 1 /* multicast */,
1044                 1 /* broadcast */,
1045                 0 /* promisc   */,
1046                 1 /* allmulti  */);
1047
1048         enic->promisc = 0;
1049         enic->allmulti = 1;
1050
1051         return 0;
1052 }
1053
1054 void enic_add_packet_filter(struct enic *enic)
1055 {
1056         /* Args -> directed, multicast, broadcast, promisc, allmulti */
1057         vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1058                 enic->promisc, enic->allmulti);
1059 }
1060
1061 int enic_get_link_status(struct enic *enic)
1062 {
1063         return vnic_dev_link_status(enic->vdev);
1064 }
1065
1066 static void enic_dev_deinit(struct enic *enic)
1067 {
1068         struct rte_eth_dev *eth_dev = enic->rte_dev;
1069
1070         /* stop link status checking */
1071         vnic_dev_notify_unset(enic->vdev);
1072
1073         rte_free(eth_dev->data->mac_addrs);
1074 }
1075
1076
1077 int enic_set_vnic_res(struct enic *enic)
1078 {
1079         struct rte_eth_dev *eth_dev = enic->rte_dev;
1080         int rc = 0;
1081
1082         /* With Rx scatter support, two RQs are now used per RQ used by
1083          * the application.
1084          */
1085         if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
1086                 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1087                         eth_dev->data->nb_rx_queues,
1088                         eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
1089                 rc = -EINVAL;
1090         }
1091         if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
1092                 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1093                         eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1094                 rc = -EINVAL;
1095         }
1096
1097         if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
1098                                    eth_dev->data->nb_tx_queues)) {
1099                 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1100                         (eth_dev->data->nb_rx_queues +
1101                          eth_dev->data->nb_tx_queues), enic->conf_cq_count);
1102                 rc = -EINVAL;
1103         }
1104
1105         if (rc == 0) {
1106                 enic->rq_count = eth_dev->data->nb_rx_queues;
1107                 enic->wq_count = eth_dev->data->nb_tx_queues;
1108                 enic->cq_count = enic->rq_count + enic->wq_count;
1109         }
1110
1111         return rc;
1112 }
1113
1114 /* Initialize the completion queue for an RQ */
1115 static int
1116 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1117 {
1118         struct vnic_rq *sop_rq, *data_rq;
1119         unsigned int cq_idx;
1120         int rc = 0;
1121
1122         sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1123         data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
1124         cq_idx = rq_idx;
1125
1126         vnic_cq_clean(&enic->cq[cq_idx]);
1127         vnic_cq_init(&enic->cq[cq_idx],
1128                      0 /* flow_control_enable */,
1129                      1 /* color_enable */,
1130                      0 /* cq_head */,
1131                      0 /* cq_tail */,
1132                      1 /* cq_tail_color */,
1133                      0 /* interrupt_enable */,
1134                      1 /* cq_entry_enable */,
1135                      0 /* cq_message_enable */,
1136                      0 /* interrupt offset */,
1137                      0 /* cq_message_addr */);
1138
1139
1140         vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1141                            enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1142                            sop_rq->ring.desc_count - 1, 1, 0);
1143         if (data_rq->in_use) {
1144                 vnic_rq_init_start(data_rq,
1145                                    enic_cq_rq(enic,
1146                                    enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
1147                                    data_rq->ring.desc_count - 1, 1, 0);
1148         }
1149
1150         rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1151         if (rc)
1152                 return rc;
1153
1154         if (data_rq->in_use) {
1155                 rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1156                 if (rc) {
1157                         enic_rxmbuf_queue_release(enic, sop_rq);
1158                         return rc;
1159                 }
1160         }
1161
1162         return 0;
1163 }
1164
1165 /* The Cisco NIC can send and receive packets up to a max packet size
1166  * determined by the NIC type and firmware. There is also an MTU
1167  * configured into the NIC via the CIMC/UCSM management interface
1168  * which can be overridden by this function (up to the max packet size).
1169  * Depending on the network setup, doing so may cause packet drops
1170  * and unexpected behavior.
1171  */
1172 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1173 {
1174         unsigned int rq_idx;
1175         struct vnic_rq *rq;
1176         int rc = 0;
1177         uint16_t old_mtu;       /* previous setting */
1178         uint16_t config_mtu;    /* Value configured into NIC via CIMC/UCSM */
1179         struct rte_eth_dev *eth_dev = enic->rte_dev;
1180
1181         old_mtu = eth_dev->data->mtu;
1182         config_mtu = enic->config.mtu;
1183
1184         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1185                 return -E_RTE_SECONDARY;
1186
1187         if (new_mtu > enic->max_mtu) {
1188                 dev_err(enic,
1189                         "MTU not updated: requested (%u) greater than max (%u)\n",
1190                         new_mtu, enic->max_mtu);
1191                 return -EINVAL;
1192         }
1193         if (new_mtu < ENIC_MIN_MTU) {
1194                 dev_info(enic,
1195                         "MTU not updated: requested (%u) less than min (%u)\n",
1196                         new_mtu, ENIC_MIN_MTU);
1197                 return -EINVAL;
1198         }
1199         if (new_mtu > config_mtu)
1200                 dev_warning(enic,
1201                         "MTU (%u) is greater than value configured in NIC (%u)\n",
1202                         new_mtu, config_mtu);
1203
1204         /* The easy case is when scatter is disabled. However if the MTU
1205          * becomes greater than the mbuf data size, packet drops will ensue.
1206          */
1207         if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
1208               DEV_RX_OFFLOAD_SCATTER)) {
1209                 eth_dev->data->mtu = new_mtu;
1210                 goto set_mtu_done;
1211         }
1212
1213         /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
1214          * change Rx scatter mode if necessary for better performance. I.e. if
1215          * MTU was greater than the mbuf size and now it's less, scatter Rx
1216          * doesn't have to be used and vice versa.
1217           */
1218         rte_spinlock_lock(&enic->mtu_lock);
1219
1220         /* Stop traffic on all RQs */
1221         for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1222                 rq = &enic->rq[rq_idx];
1223                 if (rq->is_sop && rq->in_use) {
1224                         rc = enic_stop_rq(enic,
1225                                           enic_sop_rq_idx_to_rte_idx(rq_idx));
1226                         if (rc) {
1227                                 dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1228                                 goto set_mtu_done;
1229                         }
1230                 }
1231         }
1232
1233         /* replace Rx function with a no-op to avoid getting stale pkts */
1234         eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
1235         rte_mb();
1236
1237         /* Allow time for threads to exit the real Rx function. */
1238         usleep(100000);
1239
1240         /* now it is safe to reconfigure the RQs */
1241
1242         /* update the mtu */
1243         eth_dev->data->mtu = new_mtu;
1244
1245         /* free and reallocate RQs with the new MTU */
1246         for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1247                 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1248
1249                 enic_free_rq(rq);
1250                 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1251                                    rq->tot_nb_desc, rq->rx_free_thresh);
1252                 if (rc) {
1253                         dev_err(enic,
1254                                 "Fatal MTU alloc error- No traffic will pass\n");
1255                         goto set_mtu_done;
1256                 }
1257
1258                 rc = enic_reinit_rq(enic, rq_idx);
1259                 if (rc) {
1260                         dev_err(enic,
1261                                 "Fatal MTU RQ reinit- No traffic will pass\n");
1262                         goto set_mtu_done;
1263                 }
1264         }
1265
1266         /* put back the real receive function */
1267         rte_mb();
1268         eth_dev->rx_pkt_burst = enic_recv_pkts;
1269         rte_mb();
1270
1271         /* restart Rx traffic */
1272         for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1273                 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1274                 if (rq->is_sop && rq->in_use)
1275                         enic_start_rq(enic, rq_idx);
1276         }
1277
1278 set_mtu_done:
1279         dev_info(enic, "MTU changed from %u to %u\n",  old_mtu, new_mtu);
1280         rte_spinlock_unlock(&enic->mtu_lock);
1281         return rc;
1282 }
1283
1284 static int enic_dev_init(struct enic *enic)
1285 {
1286         int err;
1287         struct rte_eth_dev *eth_dev = enic->rte_dev;
1288
1289         vnic_dev_intr_coal_timer_info_default(enic->vdev);
1290
1291         /* Get vNIC configuration
1292         */
1293         err = enic_get_vnic_config(enic);
1294         if (err) {
1295                 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1296                 return err;
1297         }
1298
1299         /* Get available resource counts */
1300         enic_get_res_counts(enic);
1301         if (enic->conf_rq_count == 1) {
1302                 dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1303                 dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1304                 dev_err(enic, "See the ENIC PMD guide for more information.\n");
1305                 return -EINVAL;
1306         }
1307
1308         /* Get the supported filters */
1309         enic_fdir_info(enic);
1310
1311         eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
1312                                                 * ENIC_MAX_MAC_ADDR, 0);
1313         if (!eth_dev->data->mac_addrs) {
1314                 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1315                 return -1;
1316         }
1317         ether_addr_copy((struct ether_addr *) enic->mac_addr,
1318                         eth_dev->data->mac_addrs);
1319
1320         vnic_dev_set_reset_flag(enic->vdev, 0);
1321
1322         LIST_INIT(&enic->flows);
1323         rte_spinlock_init(&enic->flows_lock);
1324
1325         /* set up link status checking */
1326         vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1327
1328         return 0;
1329
1330 }
1331
1332 int enic_probe(struct enic *enic)
1333 {
1334         struct rte_pci_device *pdev = enic->pdev;
1335         int err = -1;
1336
1337         dev_debug(enic, " Initializing ENIC PMD\n");
1338
1339         /* if this is a secondary process the hardware is already initialized */
1340         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1341                 return 0;
1342
1343         enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1344         enic->bar0.len = pdev->mem_resource[0].len;
1345
1346         /* Register vNIC device */
1347         enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1348         if (!enic->vdev) {
1349                 dev_err(enic, "vNIC registration failed, aborting\n");
1350                 goto err_out;
1351         }
1352
1353         LIST_INIT(&enic->memzone_list);
1354         rte_spinlock_init(&enic->memzone_list_lock);
1355
1356         vnic_register_cbacks(enic->vdev,
1357                 enic_alloc_consistent,
1358                 enic_free_consistent);
1359
1360         /* Issue device open to get device in known state */
1361         err = enic_dev_open(enic);
1362         if (err) {
1363                 dev_err(enic, "vNIC dev open failed, aborting\n");
1364                 goto err_out_unregister;
1365         }
1366
1367         /* Set ingress vlan rewrite mode before vnic initialization */
1368         err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1369                 IG_VLAN_REWRITE_MODE_PASS_THRU);
1370         if (err) {
1371                 dev_err(enic,
1372                         "Failed to set ingress vlan rewrite mode, aborting.\n");
1373                 goto err_out_dev_close;
1374         }
1375
1376         /* Issue device init to initialize the vnic-to-switch link.
1377          * We'll start with carrier off and wait for link UP
1378          * notification later to turn on carrier.  We don't need
1379          * to wait here for the vnic-to-switch link initialization
1380          * to complete; link UP notification is the indication that
1381          * the process is complete.
1382          */
1383
1384         err = vnic_dev_init(enic->vdev, 0);
1385         if (err) {
1386                 dev_err(enic, "vNIC dev init failed, aborting\n");
1387                 goto err_out_dev_close;
1388         }
1389
1390         err = enic_dev_init(enic);
1391         if (err) {
1392                 dev_err(enic, "Device initialization failed, aborting\n");
1393                 goto err_out_dev_close;
1394         }
1395
1396         return 0;
1397
1398 err_out_dev_close:
1399         vnic_dev_close(enic->vdev);
1400 err_out_unregister:
1401         vnic_dev_unregister(enic->vdev);
1402 err_out:
1403         return err;
1404 }
1405
1406 void enic_remove(struct enic *enic)
1407 {
1408         enic_dev_deinit(enic);
1409         vnic_dev_close(enic->vdev);
1410         vnic_dev_unregister(enic->vdev);
1411 }