cdecf6b512ef0d4cc0df5f816c3daa4cd907fe3a
[dpdk.git] / drivers / net / cxgbe / cxgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Chelsio Communications.
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14 #include <netinet/in.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_bus_pci.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <ethdev_driver.h>
31 #include <ethdev_pci.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
34 #include <rte_dev.h>
35
36 #include "cxgbe.h"
37 #include "cxgbe_pfvf.h"
38 #include "cxgbe_flow.h"
39
40 /*
41  * Macros needed to support the PCI Device ID Table ...
42  */
43 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
44         static const struct rte_pci_id cxgb4_pci_tbl[] = {
45 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
46
47 #define PCI_VENDOR_ID_CHELSIO 0x1425
48
49 #define CH_PCI_ID_TABLE_ENTRY(devid) \
50                 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
51
52 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
53                 { .vendor_id = 0, } \
54         }
55
56 /*
57  *... and the PCI ID Table itself ...
58  */
59 #include "base/t4_pci_id_tbl.h"
60
61 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
62                          uint16_t nb_pkts)
63 {
64         struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
65         uint16_t pkts_sent, pkts_remain;
66         uint16_t total_sent = 0;
67         uint16_t idx = 0;
68         int ret = 0;
69
70         t4_os_lock(&txq->txq_lock);
71         /* free up desc from already completed tx */
72         reclaim_completed_tx(&txq->q);
73         if (unlikely(!nb_pkts))
74                 goto out_unlock;
75
76         rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
77         while (total_sent < nb_pkts) {
78                 pkts_remain = nb_pkts - total_sent;
79
80                 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
81                         idx = total_sent + pkts_sent;
82                         if ((idx + 1) < nb_pkts)
83                                 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
84                                                         volatile void *));
85                         ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
86                         if (ret < 0)
87                                 break;
88                 }
89                 if (!pkts_sent)
90                         break;
91                 total_sent += pkts_sent;
92                 /* reclaim as much as possible */
93                 reclaim_completed_tx(&txq->q);
94         }
95
96 out_unlock:
97         t4_os_unlock(&txq->txq_lock);
98         return total_sent;
99 }
100
101 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
102                          uint16_t nb_pkts)
103 {
104         struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
105         unsigned int work_done;
106
107         if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
108                 dev_err(adapter, "error in cxgbe poll\n");
109
110         return work_done;
111 }
112
113 int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
114                         struct rte_eth_dev_info *device_info)
115 {
116         struct port_info *pi = eth_dev->data->dev_private;
117         struct adapter *adapter = pi->adapter;
118
119         static const struct rte_eth_desc_lim cxgbe_desc_lim = {
120                 .nb_max = CXGBE_MAX_RING_DESC_SIZE,
121                 .nb_min = CXGBE_MIN_RING_DESC_SIZE,
122                 .nb_align = 1,
123         };
124
125         device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
126         device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
127         device_info->max_rx_queues = adapter->sge.max_ethqsets;
128         device_info->max_tx_queues = adapter->sge.max_ethqsets;
129         device_info->max_mac_addrs = 1;
130         /* XXX: For now we support one MAC/port */
131         device_info->max_vfs = adapter->params.arch.vfcount;
132         device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
133
134         device_info->rx_queue_offload_capa = 0UL;
135         device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
136
137         device_info->tx_queue_offload_capa = 0UL;
138         device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
139
140         device_info->reta_size = pi->rss_size;
141         device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
142         device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
143
144         device_info->rx_desc_lim = cxgbe_desc_lim;
145         device_info->tx_desc_lim = cxgbe_desc_lim;
146         cxgbe_get_speed_caps(pi, &device_info->speed_capa);
147
148         return 0;
149 }
150
151 int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
152 {
153         struct port_info *pi = eth_dev->data->dev_private;
154         struct adapter *adapter = pi->adapter;
155         int ret;
156
157         if (adapter->params.rawf_size != 0) {
158                 ret = cxgbe_mpstcam_rawf_enable(pi);
159                 if (ret < 0)
160                         return ret;
161         }
162
163         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
164                              1, -1, 1, -1, false);
165 }
166
167 int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
168 {
169         struct port_info *pi = eth_dev->data->dev_private;
170         struct adapter *adapter = pi->adapter;
171         int ret;
172
173         if (adapter->params.rawf_size != 0) {
174                 ret = cxgbe_mpstcam_rawf_disable(pi);
175                 if (ret < 0)
176                         return ret;
177         }
178
179         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
180                              0, -1, 1, -1, false);
181 }
182
183 int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
184 {
185         struct port_info *pi = eth_dev->data->dev_private;
186         struct adapter *adapter = pi->adapter;
187
188         /* TODO: address filters ?? */
189
190         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
191                              -1, 1, 1, -1, false);
192 }
193
194 int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
195 {
196         struct port_info *pi = eth_dev->data->dev_private;
197         struct adapter *adapter = pi->adapter;
198
199         /* TODO: address filters ?? */
200
201         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
202                              -1, 0, 1, -1, false);
203 }
204
205 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
206                           int wait_to_complete)
207 {
208         struct port_info *pi = eth_dev->data->dev_private;
209         unsigned int i, work_done, budget = 32;
210         struct link_config *lc = &pi->link_cfg;
211         struct adapter *adapter = pi->adapter;
212         struct rte_eth_link new_link = { 0 };
213         u8 old_link = pi->link_cfg.link_ok;
214         struct sge *s = &adapter->sge;
215
216         for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
217                 if (!s->fw_evtq.desc)
218                         break;
219
220                 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
221
222                 /* Exit if link status changed or always forced up */
223                 if (pi->link_cfg.link_ok != old_link ||
224                     cxgbe_force_linkup(adapter))
225                         break;
226
227                 if (!wait_to_complete)
228                         break;
229
230                 rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
231         }
232
233         new_link.link_status = cxgbe_force_linkup(adapter) ?
234                                ETH_LINK_UP : pi->link_cfg.link_ok;
235         new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
236         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
237         new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
238
239         return rte_eth_linkstatus_set(eth_dev, &new_link);
240 }
241
242 /**
243  * Set device link up.
244  */
245 int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
246 {
247         struct port_info *pi = dev->data->dev_private;
248         struct adapter *adapter = pi->adapter;
249         unsigned int work_done, budget = 32;
250         struct sge *s = &adapter->sge;
251         int ret;
252
253         if (!s->fw_evtq.desc)
254                 return -ENOMEM;
255
256         /* Flush all link events */
257         cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
258
259         /* If link already up, nothing to do */
260         if (pi->link_cfg.link_ok)
261                 return 0;
262
263         ret = cxgbe_set_link_status(pi, true);
264         if (ret)
265                 return ret;
266
267         cxgbe_dev_link_update(dev, 1);
268         return 0;
269 }
270
271 /**
272  * Set device link down.
273  */
274 int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
275 {
276         struct port_info *pi = dev->data->dev_private;
277         struct adapter *adapter = pi->adapter;
278         unsigned int work_done, budget = 32;
279         struct sge *s = &adapter->sge;
280         int ret;
281
282         if (!s->fw_evtq.desc)
283                 return -ENOMEM;
284
285         /* Flush all link events */
286         cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
287
288         /* If link already down, nothing to do */
289         if (!pi->link_cfg.link_ok)
290                 return 0;
291
292         ret = cxgbe_set_link_status(pi, false);
293         if (ret)
294                 return ret;
295
296         cxgbe_dev_link_update(dev, 0);
297         return 0;
298 }
299
300 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
301 {
302         struct port_info *pi = eth_dev->data->dev_private;
303         struct adapter *adapter = pi->adapter;
304         struct rte_eth_dev_info dev_info;
305         int err;
306         uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
307
308         err = cxgbe_dev_info_get(eth_dev, &dev_info);
309         if (err != 0)
310                 return err;
311
312         /* Must accommodate at least RTE_ETHER_MIN_MTU */
313         if (mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
314                 return -EINVAL;
315
316         err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
317                             -1, -1, true);
318         return err;
319 }
320
321 /*
322  * Stop device.
323  */
324 int cxgbe_dev_close(struct rte_eth_dev *eth_dev)
325 {
326         struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
327         struct adapter *adapter = pi->adapter;
328         u8 i;
329
330         CXGBE_FUNC_TRACE();
331
332         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
333                 return 0;
334
335         if (!(adapter->flags & FULL_INIT_DONE))
336                 return 0;
337
338         if (!pi->viid)
339                 return 0;
340
341         cxgbe_down(pi);
342         t4_sge_eth_release_queues(pi);
343         t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid);
344         pi->viid = 0;
345
346         /* Free up the adapter-wide resources only after all the ports
347          * under this PF have been closed.
348          */
349         for_each_port(adapter, i) {
350                 temp_pi = adap2pinfo(adapter, i);
351                 if (temp_pi->viid)
352                         return 0;
353         }
354
355         cxgbe_close(adapter);
356         rte_free(adapter);
357
358         return 0;
359 }
360
361 /* Start the device.
362  * It returns 0 on success.
363  */
364 int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
365 {
366         struct port_info *pi = eth_dev->data->dev_private;
367         struct rte_eth_rxmode *rx_conf = &eth_dev->data->dev_conf.rxmode;
368         struct adapter *adapter = pi->adapter;
369         int err = 0, i;
370
371         CXGBE_FUNC_TRACE();
372
373         /*
374          * If we don't have a connection to the firmware there's nothing we
375          * can do.
376          */
377         if (!(adapter->flags & FW_OK)) {
378                 err = -ENXIO;
379                 goto out;
380         }
381
382         if (!(adapter->flags & FULL_INIT_DONE)) {
383                 err = cxgbe_up(adapter);
384                 if (err < 0)
385                         goto out;
386         }
387
388         if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
389                 eth_dev->data->scattered_rx = 1;
390         else
391                 eth_dev->data->scattered_rx = 0;
392
393         cxgbe_enable_rx_queues(pi);
394
395         err = cxgbe_setup_rss(pi);
396         if (err)
397                 goto out;
398
399         for (i = 0; i < pi->n_tx_qsets; i++) {
400                 err = cxgbe_dev_tx_queue_start(eth_dev, i);
401                 if (err)
402                         goto out;
403         }
404
405         for (i = 0; i < pi->n_rx_qsets; i++) {
406                 err = cxgbe_dev_rx_queue_start(eth_dev, i);
407                 if (err)
408                         goto out;
409         }
410
411         err = cxgbe_link_start(pi);
412         if (err)
413                 goto out;
414
415 out:
416         return err;
417 }
418
419 /*
420  * Stop device: disable rx and tx functions to allow for reconfiguring.
421  */
422 int cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
423 {
424         struct port_info *pi = eth_dev->data->dev_private;
425         struct adapter *adapter = pi->adapter;
426
427         CXGBE_FUNC_TRACE();
428
429         if (!(adapter->flags & FULL_INIT_DONE))
430                 return 0;
431
432         cxgbe_down(pi);
433
434         /*
435          *  We clear queues only if both tx and rx path of the port
436          *  have been disabled
437          */
438         t4_sge_eth_clear_queues(pi);
439         eth_dev->data->scattered_rx = 0;
440
441         return 0;
442 }
443
444 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
445 {
446         struct port_info *pi = eth_dev->data->dev_private;
447         struct adapter *adapter = pi->adapter;
448         int err;
449
450         CXGBE_FUNC_TRACE();
451
452         if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
453                 eth_dev->data->dev_conf.rxmode.offloads |=
454                         DEV_RX_OFFLOAD_RSS_HASH;
455
456         if (!(adapter->flags & FW_QUEUE_BOUND)) {
457                 err = cxgbe_setup_sge_fwevtq(adapter);
458                 if (err)
459                         return err;
460                 adapter->flags |= FW_QUEUE_BOUND;
461                 if (is_pf4(adapter)) {
462                         err = cxgbe_setup_sge_ctrl_txq(adapter);
463                         if (err)
464                                 return err;
465                 }
466         }
467
468         err = cxgbe_cfg_queue_count(eth_dev);
469         if (err)
470                 return err;
471
472         return 0;
473 }
474
475 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
476 {
477         int ret;
478         struct sge_eth_txq *txq = (struct sge_eth_txq *)
479                                   (eth_dev->data->tx_queues[tx_queue_id]);
480
481         dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
482
483         ret = t4_sge_eth_txq_start(txq);
484         if (ret == 0)
485                 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
486
487         return ret;
488 }
489
490 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
491 {
492         int ret;
493         struct sge_eth_txq *txq = (struct sge_eth_txq *)
494                                   (eth_dev->data->tx_queues[tx_queue_id]);
495
496         dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
497
498         ret = t4_sge_eth_txq_stop(txq);
499         if (ret == 0)
500                 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
501
502         return ret;
503 }
504
505 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
506                              uint16_t queue_idx, uint16_t nb_desc,
507                              unsigned int socket_id,
508                              const struct rte_eth_txconf *tx_conf __rte_unused)
509 {
510         struct port_info *pi = eth_dev->data->dev_private;
511         struct adapter *adapter = pi->adapter;
512         struct sge *s = &adapter->sge;
513         unsigned int temp_nb_desc;
514         struct sge_eth_txq *txq;
515         int err = 0;
516
517         txq = &s->ethtxq[pi->first_txqset + queue_idx];
518         dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
519                   __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
520                   socket_id, pi->first_txqset);
521
522         /*  Free up the existing queue  */
523         if (eth_dev->data->tx_queues[queue_idx]) {
524                 cxgbe_dev_tx_queue_release(eth_dev, queue_idx);
525                 eth_dev->data->tx_queues[queue_idx] = NULL;
526         }
527
528         eth_dev->data->tx_queues[queue_idx] = (void *)txq;
529
530         /* Sanity Checking
531          *
532          * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
533          */
534         temp_nb_desc = nb_desc;
535         if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
536                 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
537                          __func__, CXGBE_MIN_RING_DESC_SIZE,
538                          CXGBE_DEFAULT_TX_DESC_SIZE);
539                 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
540         } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
541                 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
542                         __func__, CXGBE_MIN_RING_DESC_SIZE,
543                         CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
544                 return -(EINVAL);
545         }
546
547         txq->q.size = temp_nb_desc;
548
549         err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
550                                    s->fw_evtq.cntxt_id, socket_id);
551
552         dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
553                   __func__, txq->q.cntxt_id, txq->q.abs_id, err);
554         return err;
555 }
556
557 void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
558 {
559         struct sge_eth_txq *txq = eth_dev->data->tx_queues[qid];
560
561         if (txq) {
562                 struct port_info *pi = (struct port_info *)
563                                        (txq->eth_dev->data->dev_private);
564                 struct adapter *adap = pi->adapter;
565
566                 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
567                           __func__, pi->port_id, txq->q.cntxt_id);
568
569                 t4_sge_eth_txq_release(adap, txq);
570         }
571 }
572
573 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
574 {
575         struct port_info *pi = eth_dev->data->dev_private;
576         struct adapter *adap = pi->adapter;
577         struct sge_eth_rxq *rxq;
578         int ret;
579
580         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
581                   __func__, pi->port_id, rx_queue_id);
582
583         rxq = eth_dev->data->rx_queues[rx_queue_id];
584         ret = t4_sge_eth_rxq_start(adap, rxq);
585         if (ret == 0)
586                 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
587
588         return ret;
589 }
590
591 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
592 {
593         struct port_info *pi = eth_dev->data->dev_private;
594         struct adapter *adap = pi->adapter;
595         struct sge_eth_rxq *rxq;
596         int ret;
597
598         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
599                   __func__, pi->port_id, rx_queue_id);
600
601         rxq = eth_dev->data->rx_queues[rx_queue_id];
602         ret = t4_sge_eth_rxq_stop(adap, rxq);
603         if (ret == 0)
604                 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
605
606         return ret;
607 }
608
609 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
610                              uint16_t queue_idx, uint16_t nb_desc,
611                              unsigned int socket_id,
612                              const struct rte_eth_rxconf *rx_conf __rte_unused,
613                              struct rte_mempool *mp)
614 {
615         unsigned int pkt_len = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
616                 RTE_ETHER_CRC_LEN;
617         struct port_info *pi = eth_dev->data->dev_private;
618         struct adapter *adapter = pi->adapter;
619         struct rte_eth_dev_info dev_info;
620         struct sge *s = &adapter->sge;
621         unsigned int temp_nb_desc;
622         int err = 0, msi_idx = 0;
623         struct sge_eth_rxq *rxq;
624
625         rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
626         dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
627                   __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
628                   socket_id, mp);
629
630         err = cxgbe_dev_info_get(eth_dev, &dev_info);
631         if (err != 0) {
632                 dev_err(adap, "%s: error during getting ethernet device info",
633                         __func__);
634                 return err;
635         }
636
637         /* Must accommodate at least RTE_ETHER_MIN_MTU */
638         if ((pkt_len < dev_info.min_rx_bufsize) ||
639             (pkt_len > dev_info.max_rx_pktlen)) {
640                 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
641                         __func__, dev_info.min_rx_bufsize,
642                         dev_info.max_rx_pktlen);
643                 return -EINVAL;
644         }
645
646         /*  Free up the existing queue  */
647         if (eth_dev->data->rx_queues[queue_idx]) {
648                 cxgbe_dev_rx_queue_release(eth_dev, queue_idx);
649                 eth_dev->data->rx_queues[queue_idx] = NULL;
650         }
651
652         eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
653
654         /* Sanity Checking
655          *
656          * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
657          */
658         temp_nb_desc = nb_desc;
659         if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
660                 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
661                          __func__, CXGBE_MIN_RING_DESC_SIZE,
662                          CXGBE_DEFAULT_RX_DESC_SIZE);
663                 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
664         } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
665                 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
666                         __func__, CXGBE_MIN_RING_DESC_SIZE,
667                         CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
668                 return -(EINVAL);
669         }
670
671         rxq->rspq.size = temp_nb_desc;
672         rxq->fl.size = temp_nb_desc;
673
674         /* Set to jumbo mode if necessary */
675         if (eth_dev->data->mtu > RTE_ETHER_MTU)
676                 eth_dev->data->dev_conf.rxmode.offloads |=
677                         DEV_RX_OFFLOAD_JUMBO_FRAME;
678         else
679                 eth_dev->data->dev_conf.rxmode.offloads &=
680                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
681
682         err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
683                                &rxq->fl, NULL,
684                                is_pf4(adapter) ?
685                                t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
686                                queue_idx, socket_id);
687
688         dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
689                   __func__, err, pi->port_id, rxq->rspq.cntxt_id,
690                   rxq->rspq.abs_id);
691         return err;
692 }
693
694 void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
695 {
696         struct sge_eth_rxq *rxq = eth_dev->data->rx_queues[qid];
697
698         if (rxq) {
699                 struct port_info *pi = (struct port_info *)
700                                        (rxq->rspq.eth_dev->data->dev_private);
701                 struct adapter *adap = pi->adapter;
702
703                 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
704                           __func__, pi->port_id, rxq->rspq.cntxt_id);
705
706                 t4_sge_eth_rxq_release(adap, rxq);
707         }
708 }
709
710 /*
711  * Get port statistics.
712  */
713 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
714                                 struct rte_eth_stats *eth_stats)
715 {
716         struct port_info *pi = eth_dev->data->dev_private;
717         struct adapter *adapter = pi->adapter;
718         struct sge *s = &adapter->sge;
719         struct port_stats ps;
720         unsigned int i;
721
722         cxgbe_stats_get(pi, &ps);
723
724         /* RX Stats */
725         eth_stats->imissed  = ps.rx_ovflow0 + ps.rx_ovflow1 +
726                               ps.rx_ovflow2 + ps.rx_ovflow3 +
727                               ps.rx_trunc0 + ps.rx_trunc1 +
728                               ps.rx_trunc2 + ps.rx_trunc3;
729         eth_stats->ierrors  = ps.rx_symbol_err + ps.rx_fcs_err +
730                               ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
731                               ps.rx_len_err;
732
733         /* TX Stats */
734         eth_stats->opackets = ps.tx_frames;
735         eth_stats->obytes   = ps.tx_octets;
736         eth_stats->oerrors  = ps.tx_error_frames;
737
738         for (i = 0; i < pi->n_rx_qsets; i++) {
739                 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
740
741                 eth_stats->ipackets += rxq->stats.pkts;
742                 eth_stats->ibytes += rxq->stats.rx_bytes;
743         }
744
745         return 0;
746 }
747
748 /*
749  * Reset port statistics.
750  */
751 static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
752 {
753         struct port_info *pi = eth_dev->data->dev_private;
754         struct adapter *adapter = pi->adapter;
755         struct sge *s = &adapter->sge;
756         unsigned int i;
757
758         cxgbe_stats_reset(pi);
759         for (i = 0; i < pi->n_rx_qsets; i++) {
760                 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
761
762                 memset(&rxq->stats, 0, sizeof(rxq->stats));
763         }
764         for (i = 0; i < pi->n_tx_qsets; i++) {
765                 struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + i];
766
767                 memset(&txq->stats, 0, sizeof(txq->stats));
768         }
769
770         return 0;
771 }
772
773 /* Store extended statistics names and its offset in stats structure  */
774 struct cxgbe_dev_xstats_name_off {
775         char name[RTE_ETH_XSTATS_NAME_SIZE];
776         unsigned int offset;
777 };
778
779 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_rxq_stats_strings[] = {
780         {"packets", offsetof(struct sge_eth_rx_stats, pkts)},
781         {"bytes", offsetof(struct sge_eth_rx_stats, rx_bytes)},
782         {"checksum_offloads", offsetof(struct sge_eth_rx_stats, rx_cso)},
783         {"vlan_extractions", offsetof(struct sge_eth_rx_stats, vlan_ex)},
784         {"dropped_packets", offsetof(struct sge_eth_rx_stats, rx_drops)},
785 };
786
787 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_txq_stats_strings[] = {
788         {"packets", offsetof(struct sge_eth_tx_stats, pkts)},
789         {"bytes", offsetof(struct sge_eth_tx_stats, tx_bytes)},
790         {"tso_requests", offsetof(struct sge_eth_tx_stats, tso)},
791         {"checksum_offloads", offsetof(struct sge_eth_tx_stats, tx_cso)},
792         {"vlan_insertions", offsetof(struct sge_eth_tx_stats, vlan_ins)},
793         {"packet_mapping_errors",
794          offsetof(struct sge_eth_tx_stats, mapping_err)},
795         {"coalesced_wrs", offsetof(struct sge_eth_tx_stats, coal_wr)},
796         {"coalesced_packets", offsetof(struct sge_eth_tx_stats, coal_pkts)},
797 };
798
799 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_port_stats_strings[] = {
800         {"tx_bytes", offsetof(struct port_stats, tx_octets)},
801         {"tx_packets", offsetof(struct port_stats, tx_frames)},
802         {"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
803         {"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
804         {"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
805         {"tx_error_packets", offsetof(struct port_stats, tx_error_frames)},
806         {"tx_size_64_packets", offsetof(struct port_stats, tx_frames_64)},
807         {"tx_size_65_to_127_packets",
808          offsetof(struct port_stats, tx_frames_65_127)},
809         {"tx_size_128_to_255_packets",
810          offsetof(struct port_stats, tx_frames_128_255)},
811         {"tx_size_256_to_511_packets",
812          offsetof(struct port_stats, tx_frames_256_511)},
813         {"tx_size_512_to_1023_packets",
814          offsetof(struct port_stats, tx_frames_512_1023)},
815         {"tx_size_1024_to_1518_packets",
816          offsetof(struct port_stats, tx_frames_1024_1518)},
817         {"tx_size_1519_to_max_packets",
818          offsetof(struct port_stats, tx_frames_1519_max)},
819         {"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
820         {"tx_pause_frames", offsetof(struct port_stats, tx_pause)},
821         {"tx_ppp_pri0_packets", offsetof(struct port_stats, tx_ppp0)},
822         {"tx_ppp_pri1_packets", offsetof(struct port_stats, tx_ppp1)},
823         {"tx_ppp_pri2_packets", offsetof(struct port_stats, tx_ppp2)},
824         {"tx_ppp_pri3_packets", offsetof(struct port_stats, tx_ppp3)},
825         {"tx_ppp_pri4_packets", offsetof(struct port_stats, tx_ppp4)},
826         {"tx_ppp_pri5_packets", offsetof(struct port_stats, tx_ppp5)},
827         {"tx_ppp_pri6_packets", offsetof(struct port_stats, tx_ppp6)},
828         {"tx_ppp_pri7_packets", offsetof(struct port_stats, tx_ppp7)},
829         {"rx_bytes", offsetof(struct port_stats, rx_octets)},
830         {"rx_packets", offsetof(struct port_stats, rx_frames)},
831         {"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
832         {"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
833         {"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
834         {"rx_too_long_packets", offsetof(struct port_stats, rx_too_long)},
835         {"rx_jabber_packets", offsetof(struct port_stats, rx_jabber)},
836         {"rx_fcs_error_packets", offsetof(struct port_stats, rx_fcs_err)},
837         {"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
838         {"rx_symbol_error_packets",
839          offsetof(struct port_stats, rx_symbol_err)},
840         {"rx_short_packets", offsetof(struct port_stats, rx_runt)},
841         {"rx_size_64_packets", offsetof(struct port_stats, rx_frames_64)},
842         {"rx_size_65_to_127_packets",
843          offsetof(struct port_stats, rx_frames_65_127)},
844         {"rx_size_128_to_255_packets",
845          offsetof(struct port_stats, rx_frames_128_255)},
846         {"rx_size_256_to_511_packets",
847          offsetof(struct port_stats, rx_frames_256_511)},
848         {"rx_size_512_to_1023_packets",
849          offsetof(struct port_stats, rx_frames_512_1023)},
850         {"rx_size_1024_to_1518_packets",
851          offsetof(struct port_stats, rx_frames_1024_1518)},
852         {"rx_size_1519_to_max_packets",
853          offsetof(struct port_stats, rx_frames_1519_max)},
854         {"rx_pause_packets", offsetof(struct port_stats, rx_pause)},
855         {"rx_ppp_pri0_packets", offsetof(struct port_stats, rx_ppp0)},
856         {"rx_ppp_pri1_packets", offsetof(struct port_stats, rx_ppp1)},
857         {"rx_ppp_pri2_packets", offsetof(struct port_stats, rx_ppp2)},
858         {"rx_ppp_pri3_packets", offsetof(struct port_stats, rx_ppp3)},
859         {"rx_ppp_pri4_packets", offsetof(struct port_stats, rx_ppp4)},
860         {"rx_ppp_pri5_packets", offsetof(struct port_stats, rx_ppp5)},
861         {"rx_ppp_pri6_packets", offsetof(struct port_stats, rx_ppp6)},
862         {"rx_ppp_pri7_packets", offsetof(struct port_stats, rx_ppp7)},
863         {"rx_bg0_dropped_packets", offsetof(struct port_stats, rx_ovflow0)},
864         {"rx_bg1_dropped_packets", offsetof(struct port_stats, rx_ovflow1)},
865         {"rx_bg2_dropped_packets", offsetof(struct port_stats, rx_ovflow2)},
866         {"rx_bg3_dropped_packets", offsetof(struct port_stats, rx_ovflow3)},
867         {"rx_bg0_truncated_packets", offsetof(struct port_stats, rx_trunc0)},
868         {"rx_bg1_truncated_packets", offsetof(struct port_stats, rx_trunc1)},
869         {"rx_bg2_truncated_packets", offsetof(struct port_stats, rx_trunc2)},
870         {"rx_bg3_truncated_packets", offsetof(struct port_stats, rx_trunc3)},
871 };
872
873 static const struct cxgbe_dev_xstats_name_off
874 cxgbevf_dev_port_stats_strings[] = {
875         {"tx_bytes", offsetof(struct port_stats, tx_octets)},
876         {"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
877         {"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
878         {"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
879         {"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
880         {"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
881         {"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
882         {"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
883         {"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
884 };
885
886 #define CXGBE_NB_RXQ_STATS RTE_DIM(cxgbe_dev_rxq_stats_strings)
887 #define CXGBE_NB_TXQ_STATS RTE_DIM(cxgbe_dev_txq_stats_strings)
888 #define CXGBE_NB_PORT_STATS RTE_DIM(cxgbe_dev_port_stats_strings)
889 #define CXGBEVF_NB_PORT_STATS RTE_DIM(cxgbevf_dev_port_stats_strings)
890
891 static u16 cxgbe_dev_xstats_count(struct port_info *pi)
892 {
893         u16 count;
894
895         count = (pi->n_tx_qsets * CXGBE_NB_TXQ_STATS) +
896                 (pi->n_rx_qsets * CXGBE_NB_RXQ_STATS);
897
898         if (is_pf4(pi->adapter) != 0)
899                 count += CXGBE_NB_PORT_STATS;
900         else
901                 count += CXGBEVF_NB_PORT_STATS;
902
903         return count;
904 }
905
906 static int cxgbe_dev_xstats(struct rte_eth_dev *dev,
907                             struct rte_eth_xstat_name *xstats_names,
908                             struct rte_eth_xstat *xstats, unsigned int size)
909 {
910         const struct cxgbe_dev_xstats_name_off *xstats_str;
911         struct port_info *pi = dev->data->dev_private;
912         struct adapter *adap = pi->adapter;
913         struct sge *s = &adap->sge;
914         u16 count, i, qid, nstats;
915         struct port_stats ps;
916         u64 *stats_ptr;
917
918         count = cxgbe_dev_xstats_count(pi);
919         if (size < count)
920                 return count;
921
922         if (is_pf4(adap) != 0) {
923                 /* port stats for PF*/
924                 cxgbe_stats_get(pi, &ps);
925                 xstats_str = cxgbe_dev_port_stats_strings;
926                 nstats = CXGBE_NB_PORT_STATS;
927         } else {
928                 /* port stats for VF*/
929                 cxgbevf_stats_get(pi, &ps);
930                 xstats_str = cxgbevf_dev_port_stats_strings;
931                 nstats = CXGBEVF_NB_PORT_STATS;
932         }
933
934         count = 0;
935         for (i = 0; i < nstats; i++, count++) {
936                 if (xstats_names != NULL)
937                         snprintf(xstats_names[count].name,
938                                  sizeof(xstats_names[count].name),
939                                  "%s", xstats_str[i].name);
940                 if (xstats != NULL) {
941                         stats_ptr = RTE_PTR_ADD(&ps,
942                                                 xstats_str[i].offset);
943                         xstats[count].value = *stats_ptr;
944                         xstats[count].id = count;
945                 }
946         }
947
948         /* per-txq stats */
949         xstats_str = cxgbe_dev_txq_stats_strings;
950         for (qid = 0; qid < pi->n_tx_qsets; qid++) {
951                 struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + qid];
952
953                 for (i = 0; i < CXGBE_NB_TXQ_STATS; i++, count++) {
954                         if (xstats_names != NULL)
955                                 snprintf(xstats_names[count].name,
956                                          sizeof(xstats_names[count].name),
957                                          "tx_q%u_%s",
958                                          qid, xstats_str[i].name);
959                         if (xstats != NULL) {
960                                 stats_ptr = RTE_PTR_ADD(&txq->stats,
961                                                         xstats_str[i].offset);
962                                 xstats[count].value = *stats_ptr;
963                                 xstats[count].id = count;
964                         }
965                 }
966         }
967
968         /* per-rxq stats */
969         xstats_str = cxgbe_dev_rxq_stats_strings;
970         for (qid = 0; qid < pi->n_rx_qsets; qid++) {
971                 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + qid];
972
973                 for (i = 0; i < CXGBE_NB_RXQ_STATS; i++, count++) {
974                         if (xstats_names != NULL)
975                                 snprintf(xstats_names[count].name,
976                                          sizeof(xstats_names[count].name),
977                                          "rx_q%u_%s",
978                                          qid, xstats_str[i].name);
979                         if (xstats != NULL) {
980                                 stats_ptr = RTE_PTR_ADD(&rxq->stats,
981                                                         xstats_str[i].offset);
982                                 xstats[count].value = *stats_ptr;
983                                 xstats[count].id = count;
984                         }
985                 }
986         }
987
988         return count;
989 }
990
991 /* Get port extended statistics by ID. */
992 int cxgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
993                                const uint64_t *ids, uint64_t *values,
994                                unsigned int n)
995 {
996         struct port_info *pi = dev->data->dev_private;
997         struct rte_eth_xstat *xstats_copy;
998         u16 count, i;
999         int ret = 0;
1000
1001         count = cxgbe_dev_xstats_count(pi);
1002         if (ids == NULL || values == NULL)
1003                 return count;
1004
1005         xstats_copy = rte_calloc(NULL, count, sizeof(*xstats_copy), 0);
1006         if (xstats_copy == NULL)
1007                 return -ENOMEM;
1008
1009         cxgbe_dev_xstats(dev, NULL, xstats_copy, count);
1010
1011         for (i = 0; i < n; i++) {
1012                 if (ids[i] >= count) {
1013                         ret = -EINVAL;
1014                         goto out_err;
1015                 }
1016                 values[i] = xstats_copy[ids[i]].value;
1017         }
1018
1019         ret = n;
1020
1021 out_err:
1022         rte_free(xstats_copy);
1023         return ret;
1024 }
1025
1026 /* Get names of port extended statistics by ID. */
1027 int cxgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1028                                             const uint64_t *ids,
1029                                             struct rte_eth_xstat_name *xnames,
1030                                             unsigned int n)
1031 {
1032         struct port_info *pi = dev->data->dev_private;
1033         struct rte_eth_xstat_name *xnames_copy;
1034         u16 count, i;
1035         int ret = 0;
1036
1037         count = cxgbe_dev_xstats_count(pi);
1038         if (ids == NULL || xnames == NULL)
1039                 return count;
1040
1041         xnames_copy = rte_calloc(NULL, count, sizeof(*xnames_copy), 0);
1042         if (xnames_copy == NULL)
1043                 return -ENOMEM;
1044
1045         cxgbe_dev_xstats(dev, xnames_copy, NULL, count);
1046
1047         for (i = 0; i < n; i++) {
1048                 if (ids[i] >= count) {
1049                         ret = -EINVAL;
1050                         goto out_err;
1051                 }
1052                 rte_strlcpy(xnames[i].name, xnames_copy[ids[i]].name,
1053                             sizeof(xnames[i].name));
1054         }
1055
1056         ret = n;
1057
1058 out_err:
1059         rte_free(xnames_copy);
1060         return ret;
1061 }
1062
1063 /* Get port extended statistics. */
1064 int cxgbe_dev_xstats_get(struct rte_eth_dev *dev,
1065                          struct rte_eth_xstat *xstats, unsigned int n)
1066 {
1067         return cxgbe_dev_xstats(dev, NULL, xstats, n);
1068 }
1069
1070 /* Get names of port extended statistics. */
1071 int cxgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1072                                struct rte_eth_xstat_name *xstats_names,
1073                                unsigned int n)
1074 {
1075         return cxgbe_dev_xstats(dev, xstats_names, NULL, n);
1076 }
1077
1078 /* Reset port extended statistics. */
1079 static int cxgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1080 {
1081         return cxgbe_dev_stats_reset(dev);
1082 }
1083
1084 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1085                                struct rte_eth_fc_conf *fc_conf)
1086 {
1087         struct port_info *pi = eth_dev->data->dev_private;
1088         struct link_config *lc = &pi->link_cfg;
1089         u8 rx_pause = 0, tx_pause = 0;
1090         u32 caps = lc->link_caps;
1091
1092         if (caps & FW_PORT_CAP32_ANEG)
1093                 fc_conf->autoneg = 1;
1094
1095         if (caps & FW_PORT_CAP32_FC_TX)
1096                 tx_pause = 1;
1097
1098         if (caps & FW_PORT_CAP32_FC_RX)
1099                 rx_pause = 1;
1100
1101         if (rx_pause && tx_pause)
1102                 fc_conf->mode = RTE_FC_FULL;
1103         else if (rx_pause)
1104                 fc_conf->mode = RTE_FC_RX_PAUSE;
1105         else if (tx_pause)
1106                 fc_conf->mode = RTE_FC_TX_PAUSE;
1107         else
1108                 fc_conf->mode = RTE_FC_NONE;
1109         return 0;
1110 }
1111
1112 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1113                                struct rte_eth_fc_conf *fc_conf)
1114 {
1115         struct port_info *pi = eth_dev->data->dev_private;
1116         struct link_config *lc = &pi->link_cfg;
1117         u32 new_caps = lc->admin_caps;
1118         u8 tx_pause = 0, rx_pause = 0;
1119         int ret;
1120
1121         if (fc_conf->mode == RTE_FC_FULL) {
1122                 tx_pause = 1;
1123                 rx_pause = 1;
1124         } else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
1125                 tx_pause = 1;
1126         } else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
1127                 rx_pause = 1;
1128         }
1129
1130         ret = t4_set_link_pause(pi, fc_conf->autoneg, tx_pause,
1131                                 rx_pause, &new_caps);
1132         if (ret != 0)
1133                 return ret;
1134
1135         if (!fc_conf->autoneg) {
1136                 if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
1137                         new_caps |= FW_PORT_CAP32_FORCE_PAUSE;
1138         } else {
1139                 new_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
1140         }
1141
1142         if (new_caps != lc->admin_caps) {
1143                 ret = t4_link_l1cfg(pi, new_caps);
1144                 if (ret == 0)
1145                         lc->admin_caps = new_caps;
1146         }
1147
1148         return ret;
1149 }
1150
1151 const uint32_t *
1152 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1153 {
1154         static const uint32_t ptypes[] = {
1155                 RTE_PTYPE_L3_IPV4,
1156                 RTE_PTYPE_L3_IPV6,
1157                 RTE_PTYPE_UNKNOWN
1158         };
1159
1160         if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
1161                 return ptypes;
1162         return NULL;
1163 }
1164
1165 /* Update RSS hash configuration
1166  */
1167 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
1168                                      struct rte_eth_rss_conf *rss_conf)
1169 {
1170         struct port_info *pi = dev->data->dev_private;
1171         struct adapter *adapter = pi->adapter;
1172         int err;
1173
1174         err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
1175         if (err)
1176                 return err;
1177
1178         pi->rss_hf = rss_conf->rss_hf;
1179
1180         if (rss_conf->rss_key) {
1181                 u32 key[10], mod_key[10];
1182                 int i, j;
1183
1184                 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
1185
1186                 for (i = 9, j = 0; i >= 0; i--, j++)
1187                         mod_key[j] = cpu_to_be32(key[i]);
1188
1189                 t4_write_rss_key(adapter, mod_key, -1);
1190         }
1191
1192         return 0;
1193 }
1194
1195 /* Get RSS hash configuration
1196  */
1197 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1198                                        struct rte_eth_rss_conf *rss_conf)
1199 {
1200         struct port_info *pi = dev->data->dev_private;
1201         struct adapter *adapter = pi->adapter;
1202         u64 rss_hf = 0;
1203         u64 flags = 0;
1204         int err;
1205
1206         err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
1207                                     &flags, NULL);
1208
1209         if (err)
1210                 return err;
1211
1212         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
1213                 rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
1214                 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1215                         rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
1216         }
1217
1218         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1219                 rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
1220
1221         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
1222                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1223                 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1224                         rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1225         }
1226
1227         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1228                 rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
1229
1230         rss_conf->rss_hf = rss_hf;
1231
1232         if (rss_conf->rss_key) {
1233                 u32 key[10], mod_key[10];
1234                 int i, j;
1235
1236                 t4_read_rss_key(adapter, key);
1237
1238                 for (i = 9, j = 0; i >= 0; i--, j++)
1239                         mod_key[j] = be32_to_cpu(key[i]);
1240
1241                 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
1242         }
1243
1244         return 0;
1245 }
1246
1247 static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
1248                                      struct rte_eth_rss_reta_entry64 *reta_conf,
1249                                      uint16_t reta_size)
1250 {
1251         struct port_info *pi = dev->data->dev_private;
1252         struct adapter *adapter = pi->adapter;
1253         u16 i, idx, shift, *rss;
1254         int ret;
1255
1256         if (!(adapter->flags & FULL_INIT_DONE))
1257                 return -ENOMEM;
1258
1259         if (!reta_size || reta_size > pi->rss_size)
1260                 return -EINVAL;
1261
1262         rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0);
1263         if (!rss)
1264                 return -ENOMEM;
1265
1266         rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
1267         for (i = 0; i < reta_size; i++) {
1268                 idx = i / RTE_RETA_GROUP_SIZE;
1269                 shift = i % RTE_RETA_GROUP_SIZE;
1270                 if (!(reta_conf[idx].mask & (1ULL << shift)))
1271                         continue;
1272
1273                 rss[i] = reta_conf[idx].reta[shift];
1274         }
1275
1276         ret = cxgbe_write_rss(pi, rss);
1277         if (!ret)
1278                 rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16));
1279
1280         rte_free(rss);
1281         return ret;
1282 }
1283
1284 static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
1285                                     struct rte_eth_rss_reta_entry64 *reta_conf,
1286                                     uint16_t reta_size)
1287 {
1288         struct port_info *pi = dev->data->dev_private;
1289         struct adapter *adapter = pi->adapter;
1290         u16 i, idx, shift;
1291
1292         if (!(adapter->flags & FULL_INIT_DONE))
1293                 return -ENOMEM;
1294
1295         if (!reta_size || reta_size > pi->rss_size)
1296                 return -EINVAL;
1297
1298         for (i = 0; i < reta_size; i++) {
1299                 idx = i / RTE_RETA_GROUP_SIZE;
1300                 shift = i % RTE_RETA_GROUP_SIZE;
1301                 if (!(reta_conf[idx].mask & (1ULL << shift)))
1302                         continue;
1303
1304                 reta_conf[idx].reta[shift] = pi->rss[i];
1305         }
1306
1307         return 0;
1308 }
1309
1310 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
1311 {
1312         RTE_SET_USED(dev);
1313         return EEPROMSIZE;
1314 }
1315
1316 /**
1317  * eeprom_ptov - translate a physical EEPROM address to virtual
1318  * @phys_addr: the physical EEPROM address
1319  * @fn: the PCI function number
1320  * @sz: size of function-specific area
1321  *
1322  * Translate a physical EEPROM address to virtual.  The first 1K is
1323  * accessed through virtual addresses starting at 31K, the rest is
1324  * accessed through virtual addresses starting at 0.
1325  *
1326  * The mapping is as follows:
1327  * [0..1K) -> [31K..32K)
1328  * [1K..1K+A) -> [31K-A..31K)
1329  * [1K+A..ES) -> [0..ES-A-1K)
1330  *
1331  * where A = @fn * @sz, and ES = EEPROM size.
1332  */
1333 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1334 {
1335         fn *= sz;
1336         if (phys_addr < 1024)
1337                 return phys_addr + (31 << 10);
1338         if (phys_addr < 1024 + fn)
1339                 return fn + phys_addr - 1024;
1340         if (phys_addr < EEPROMSIZE)
1341                 return phys_addr - 1024 - fn;
1342         if (phys_addr < EEPROMVSIZE)
1343                 return phys_addr - 1024;
1344         return -EINVAL;
1345 }
1346
1347 /* The next two routines implement eeprom read/write from physical addresses.
1348  */
1349 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1350 {
1351         int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1352
1353         if (vaddr >= 0)
1354                 vaddr = t4_seeprom_read(adap, vaddr, v);
1355         return vaddr < 0 ? vaddr : 0;
1356 }
1357
1358 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1359 {
1360         int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1361
1362         if (vaddr >= 0)
1363                 vaddr = t4_seeprom_write(adap, vaddr, v);
1364         return vaddr < 0 ? vaddr : 0;
1365 }
1366
1367 #define EEPROM_MAGIC 0x38E2F10C
1368
1369 static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
1370                             struct rte_dev_eeprom_info *e)
1371 {
1372         struct port_info *pi = dev->data->dev_private;
1373         struct adapter *adapter = pi->adapter;
1374         u32 i, err = 0;
1375         u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
1376
1377         if (!buf)
1378                 return -ENOMEM;
1379
1380         e->magic = EEPROM_MAGIC;
1381         for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
1382                 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1383
1384         if (!err)
1385                 rte_memcpy(e->data, buf + e->offset, e->length);
1386         rte_free(buf);
1387         return err;
1388 }
1389
1390 static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
1391                             struct rte_dev_eeprom_info *eeprom)
1392 {
1393         struct port_info *pi = dev->data->dev_private;
1394         struct adapter *adapter = pi->adapter;
1395         u8 *buf;
1396         int err = 0;
1397         u32 aligned_offset, aligned_len, *p;
1398
1399         if (eeprom->magic != EEPROM_MAGIC)
1400                 return -EINVAL;
1401
1402         aligned_offset = eeprom->offset & ~3;
1403         aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1404
1405         if (adapter->pf > 0) {
1406                 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1407
1408                 if (aligned_offset < start ||
1409                     aligned_offset + aligned_len > start + EEPROMPFSIZE)
1410                         return -EPERM;
1411         }
1412
1413         if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1414                 /* RMW possibly needed for first or last words.
1415                  */
1416                 buf = rte_zmalloc(NULL, aligned_len, 0);
1417                 if (!buf)
1418                         return -ENOMEM;
1419                 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1420                 if (!err && aligned_len > 4)
1421                         err = eeprom_rd_phys(adapter,
1422                                              aligned_offset + aligned_len - 4,
1423                                              (u32 *)&buf[aligned_len - 4]);
1424                 if (err)
1425                         goto out;
1426                 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1427                            eeprom->length);
1428         } else {
1429                 buf = eeprom->data;
1430         }
1431
1432         err = t4_seeprom_wp(adapter, false);
1433         if (err)
1434                 goto out;
1435
1436         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1437                 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1438                 aligned_offset += 4;
1439         }
1440
1441         if (!err)
1442                 err = t4_seeprom_wp(adapter, true);
1443 out:
1444         if (buf != eeprom->data)
1445                 rte_free(buf);
1446         return err;
1447 }
1448
1449 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
1450 {
1451         struct port_info *pi = eth_dev->data->dev_private;
1452         struct adapter *adapter = pi->adapter;
1453
1454         return t4_get_regs_len(adapter) / sizeof(uint32_t);
1455 }
1456
1457 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
1458                           struct rte_dev_reg_info *regs)
1459 {
1460         struct port_info *pi = eth_dev->data->dev_private;
1461         struct adapter *adapter = pi->adapter;
1462
1463         regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
1464                 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
1465                 (1 << 16);
1466
1467         if (regs->data == NULL) {
1468                 regs->length = cxgbe_get_regs_len(eth_dev);
1469                 regs->width = sizeof(uint32_t);
1470
1471                 return 0;
1472         }
1473
1474         t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1475
1476         return 0;
1477 }
1478
1479 int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1480 {
1481         struct port_info *pi = dev->data->dev_private;
1482         int ret;
1483
1484         ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
1485         if (ret < 0) {
1486                 dev_err(adapter, "failed to set mac addr; err = %d\n",
1487                         ret);
1488                 return ret;
1489         }
1490         pi->xact_addr_filt = ret;
1491         return 0;
1492 }
1493
1494 static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
1495                                            struct rte_eth_fec_capa *capa_arr)
1496 {
1497         int num = 0;
1498
1499         if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
1500                 if (capa_arr) {
1501                         capa_arr[num].speed = ETH_SPEED_NUM_100G;
1502                         capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1503                                              RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1504                 }
1505                 num++;
1506         }
1507
1508         if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
1509                 if (capa_arr) {
1510                         capa_arr[num].speed = ETH_SPEED_NUM_50G;
1511                         capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1512                                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1513                 }
1514                 num++;
1515         }
1516
1517         if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
1518                 if (capa_arr) {
1519                         capa_arr[num].speed = ETH_SPEED_NUM_25G;
1520                         capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1521                                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
1522                                              RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1523                 }
1524                 num++;
1525         }
1526
1527         return num;
1528 }
1529
1530 static int cxgbe_fec_get_capability(struct rte_eth_dev *dev,
1531                                     struct rte_eth_fec_capa *speed_fec_capa,
1532                                     unsigned int num)
1533 {
1534         struct port_info *pi = dev->data->dev_private;
1535         struct link_config *lc = &pi->link_cfg;
1536         u8 num_entries;
1537
1538         if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1539                 return -EOPNOTSUPP;
1540
1541         num_entries = cxgbe_fec_get_capa_speed_to_fec(lc, NULL);
1542         if (!speed_fec_capa || num < num_entries)
1543                 return num_entries;
1544
1545         return cxgbe_fec_get_capa_speed_to_fec(lc, speed_fec_capa);
1546 }
1547
1548 static int cxgbe_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
1549 {
1550         struct port_info *pi = dev->data->dev_private;
1551         struct link_config *lc = &pi->link_cfg;
1552         u32 fec_caps = 0, caps = lc->link_caps;
1553
1554         if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1555                 return -EOPNOTSUPP;
1556
1557         if (caps & FW_PORT_CAP32_FEC_RS)
1558                 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1559         else if (caps & FW_PORT_CAP32_FEC_BASER_RS)
1560                 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1561         else
1562                 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
1563
1564         *fec_capa = fec_caps;
1565         return 0;
1566 }
1567
1568 static int cxgbe_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
1569 {
1570         struct port_info *pi = dev->data->dev_private;
1571         u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
1572         struct link_config *lc = &pi->link_cfg;
1573         u32 new_caps = lc->admin_caps;
1574         int ret;
1575
1576         if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1577                 return -EOPNOTSUPP;
1578
1579         if (!fec_capa)
1580                 return -EINVAL;
1581
1582         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
1583                 goto set_fec;
1584
1585         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC))
1586                 fec_none = 1;
1587
1588         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
1589                 fec_baser = 1;
1590
1591         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
1592                 fec_rs = 1;
1593
1594 set_fec:
1595         ret = t4_set_link_fec(pi, fec_rs, fec_baser, fec_none, &new_caps);
1596         if (ret != 0)
1597                 return ret;
1598
1599         if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
1600                 new_caps |= FW_PORT_CAP32_FORCE_FEC;
1601         else
1602                 new_caps &= ~FW_PORT_CAP32_FORCE_FEC;
1603
1604         if (new_caps != lc->admin_caps) {
1605                 ret = t4_link_l1cfg(pi, new_caps);
1606                 if (ret == 0)
1607                         lc->admin_caps = new_caps;
1608         }
1609
1610         return ret;
1611 }
1612
1613 int cxgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1614                          size_t fw_size)
1615 {
1616         struct port_info *pi = dev->data->dev_private;
1617         struct adapter *adapter = pi->adapter;
1618         int ret;
1619
1620         if (adapter->params.fw_vers == 0)
1621                 return -EIO;
1622
1623         ret = snprintf(fw_version, fw_size, "%u.%u.%u.%u",
1624                        G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
1625                        G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
1626                        G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
1627                        G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
1628         if (ret < 0)
1629                 return -EINVAL;
1630
1631         ret += 1;
1632         if (fw_size < (size_t)ret)
1633                 return ret;
1634
1635         return 0;
1636 }
1637
1638 static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1639         .dev_start              = cxgbe_dev_start,
1640         .dev_stop               = cxgbe_dev_stop,
1641         .dev_close              = cxgbe_dev_close,
1642         .promiscuous_enable     = cxgbe_dev_promiscuous_enable,
1643         .promiscuous_disable    = cxgbe_dev_promiscuous_disable,
1644         .allmulticast_enable    = cxgbe_dev_allmulticast_enable,
1645         .allmulticast_disable   = cxgbe_dev_allmulticast_disable,
1646         .dev_configure          = cxgbe_dev_configure,
1647         .dev_infos_get          = cxgbe_dev_info_get,
1648         .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1649         .link_update            = cxgbe_dev_link_update,
1650         .dev_set_link_up        = cxgbe_dev_set_link_up,
1651         .dev_set_link_down      = cxgbe_dev_set_link_down,
1652         .mtu_set                = cxgbe_dev_mtu_set,
1653         .tx_queue_setup         = cxgbe_dev_tx_queue_setup,
1654         .tx_queue_start         = cxgbe_dev_tx_queue_start,
1655         .tx_queue_stop          = cxgbe_dev_tx_queue_stop,
1656         .tx_queue_release       = cxgbe_dev_tx_queue_release,
1657         .rx_queue_setup         = cxgbe_dev_rx_queue_setup,
1658         .rx_queue_start         = cxgbe_dev_rx_queue_start,
1659         .rx_queue_stop          = cxgbe_dev_rx_queue_stop,
1660         .rx_queue_release       = cxgbe_dev_rx_queue_release,
1661         .flow_ops_get           = cxgbe_dev_flow_ops_get,
1662         .stats_get              = cxgbe_dev_stats_get,
1663         .stats_reset            = cxgbe_dev_stats_reset,
1664         .xstats_get             = cxgbe_dev_xstats_get,
1665         .xstats_get_by_id       = cxgbe_dev_xstats_get_by_id,
1666         .xstats_get_names       = cxgbe_dev_xstats_get_names,
1667         .xstats_get_names_by_id = cxgbe_dev_xstats_get_names_by_id,
1668         .xstats_reset           = cxgbe_dev_xstats_reset,
1669         .flow_ctrl_get          = cxgbe_flow_ctrl_get,
1670         .flow_ctrl_set          = cxgbe_flow_ctrl_set,
1671         .get_eeprom_length      = cxgbe_get_eeprom_length,
1672         .get_eeprom             = cxgbe_get_eeprom,
1673         .set_eeprom             = cxgbe_set_eeprom,
1674         .get_reg                = cxgbe_get_regs,
1675         .rss_hash_update        = cxgbe_dev_rss_hash_update,
1676         .rss_hash_conf_get      = cxgbe_dev_rss_hash_conf_get,
1677         .mac_addr_set           = cxgbe_mac_addr_set,
1678         .reta_update            = cxgbe_dev_rss_reta_update,
1679         .reta_query             = cxgbe_dev_rss_reta_query,
1680         .fec_get_capability     = cxgbe_fec_get_capability,
1681         .fec_get                = cxgbe_fec_get,
1682         .fec_set                = cxgbe_fec_set,
1683         .fw_version_get         = cxgbe_fw_version_get,
1684 };
1685
1686 /*
1687  * Initialize driver
1688  * It returns 0 on success.
1689  */
1690 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1691 {
1692         struct rte_pci_device *pci_dev;
1693         struct port_info *pi = eth_dev->data->dev_private;
1694         struct adapter *adapter = NULL;
1695         char name[RTE_ETH_NAME_MAX_LEN];
1696         int err = 0;
1697
1698         CXGBE_FUNC_TRACE();
1699
1700         eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1701         eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1702         eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1703         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1704
1705         /* for secondary processes, we attach to ethdevs allocated by primary
1706          * and do minimal initialization.
1707          */
1708         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1709                 int i;
1710
1711                 for (i = 1; i < MAX_NPORTS; i++) {
1712                         struct rte_eth_dev *rest_eth_dev;
1713                         char namei[RTE_ETH_NAME_MAX_LEN];
1714
1715                         snprintf(namei, sizeof(namei), "%s_%d",
1716                                  pci_dev->device.name, i);
1717                         rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1718                         if (rest_eth_dev) {
1719                                 rest_eth_dev->device = &pci_dev->device;
1720                                 rest_eth_dev->dev_ops =
1721                                         eth_dev->dev_ops;
1722                                 rest_eth_dev->rx_pkt_burst =
1723                                         eth_dev->rx_pkt_burst;
1724                                 rest_eth_dev->tx_pkt_burst =
1725                                         eth_dev->tx_pkt_burst;
1726                                 rte_eth_dev_probing_finish(rest_eth_dev);
1727                         }
1728                 }
1729                 return 0;
1730         }
1731
1732         snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1733         adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1734         if (!adapter)
1735                 return -1;
1736
1737         adapter->use_unpacked_mode = 1;
1738         adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1739         if (!adapter->regs) {
1740                 dev_err(adapter, "%s: cannot map device registers\n", __func__);
1741                 err = -ENOMEM;
1742                 goto out_free_adapter;
1743         }
1744         adapter->pdev = pci_dev;
1745         adapter->eth_dev = eth_dev;
1746         pi->adapter = adapter;
1747
1748         cxgbe_process_devargs(adapter);
1749
1750         err = cxgbe_probe(adapter);
1751         if (err) {
1752                 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1753                         __func__, err);
1754                 goto out_free_adapter;
1755         }
1756
1757         return 0;
1758
1759 out_free_adapter:
1760         rte_free(adapter);
1761         return err;
1762 }
1763
1764 static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1765 {
1766         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1767         uint16_t port_id;
1768         int err = 0;
1769
1770         /* Free up other ports and all resources */
1771         RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1772                 err |= rte_eth_dev_close(port_id);
1773
1774         return err == 0 ? 0 : -EIO;
1775 }
1776
1777 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1778         struct rte_pci_device *pci_dev)
1779 {
1780         return rte_eth_dev_pci_generic_probe(pci_dev,
1781                 sizeof(struct port_info), eth_cxgbe_dev_init);
1782 }
1783
1784 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1785 {
1786         return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
1787 }
1788
1789 static struct rte_pci_driver rte_cxgbe_pmd = {
1790         .id_table = cxgb4_pci_tbl,
1791         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1792         .probe = eth_cxgbe_pci_probe,
1793         .remove = eth_cxgbe_pci_remove,
1794 };
1795
1796 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
1797 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
1798 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1799 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
1800                               CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
1801                               CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
1802                               CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
1803                               CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
1804 RTE_LOG_REGISTER_DEFAULT(cxgbe_logtype, NOTICE);
1805 RTE_LOG_REGISTER_SUFFIX(cxgbe_mbox_logtype, mbox, NOTICE);