458111ae5b160bca966906f47848346ee04fee0c
[dpdk.git] / drivers / net / cxgbe / cxgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Chelsio Communications.
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14 #include <netinet/in.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_bus_pci.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <ethdev_driver.h>
31 #include <ethdev_pci.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
34 #include <rte_dev.h>
35
36 #include "cxgbe.h"
37 #include "cxgbe_pfvf.h"
38 #include "cxgbe_flow.h"
39
40 /*
41  * Macros needed to support the PCI Device ID Table ...
42  */
43 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
44         static const struct rte_pci_id cxgb4_pci_tbl[] = {
45 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
46
47 #define PCI_VENDOR_ID_CHELSIO 0x1425
48
49 #define CH_PCI_ID_TABLE_ENTRY(devid) \
50                 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
51
52 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
53                 { .vendor_id = 0, } \
54         }
55
56 /*
57  *... and the PCI ID Table itself ...
58  */
59 #include "base/t4_pci_id_tbl.h"
60
61 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
62                          uint16_t nb_pkts)
63 {
64         struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
65         uint16_t pkts_sent, pkts_remain;
66         uint16_t total_sent = 0;
67         uint16_t idx = 0;
68         int ret = 0;
69
70         t4_os_lock(&txq->txq_lock);
71         /* free up desc from already completed tx */
72         reclaim_completed_tx(&txq->q);
73         if (unlikely(!nb_pkts))
74                 goto out_unlock;
75
76         rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
77         while (total_sent < nb_pkts) {
78                 pkts_remain = nb_pkts - total_sent;
79
80                 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
81                         idx = total_sent + pkts_sent;
82                         if ((idx + 1) < nb_pkts)
83                                 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
84                                                         volatile void *));
85                         ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
86                         if (ret < 0)
87                                 break;
88                 }
89                 if (!pkts_sent)
90                         break;
91                 total_sent += pkts_sent;
92                 /* reclaim as much as possible */
93                 reclaim_completed_tx(&txq->q);
94         }
95
96 out_unlock:
97         t4_os_unlock(&txq->txq_lock);
98         return total_sent;
99 }
100
101 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
102                          uint16_t nb_pkts)
103 {
104         struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
105         unsigned int work_done;
106
107         if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
108                 dev_err(adapter, "error in cxgbe poll\n");
109
110         return work_done;
111 }
112
113 int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
114                         struct rte_eth_dev_info *device_info)
115 {
116         struct port_info *pi = eth_dev->data->dev_private;
117         struct adapter *adapter = pi->adapter;
118
119         static const struct rte_eth_desc_lim cxgbe_desc_lim = {
120                 .nb_max = CXGBE_MAX_RING_DESC_SIZE,
121                 .nb_min = CXGBE_MIN_RING_DESC_SIZE,
122                 .nb_align = 1,
123         };
124
125         device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
126         device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
127         device_info->max_rx_queues = adapter->sge.max_ethqsets;
128         device_info->max_tx_queues = adapter->sge.max_ethqsets;
129         device_info->max_mac_addrs = 1;
130         /* XXX: For now we support one MAC/port */
131         device_info->max_vfs = adapter->params.arch.vfcount;
132         device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
133
134         device_info->rx_queue_offload_capa = 0UL;
135         device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
136
137         device_info->tx_queue_offload_capa = 0UL;
138         device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
139
140         device_info->reta_size = pi->rss_size;
141         device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
142         device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
143
144         device_info->rx_desc_lim = cxgbe_desc_lim;
145         device_info->tx_desc_lim = cxgbe_desc_lim;
146         cxgbe_get_speed_caps(pi, &device_info->speed_capa);
147
148         return 0;
149 }
150
151 int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
152 {
153         struct port_info *pi = eth_dev->data->dev_private;
154         struct adapter *adapter = pi->adapter;
155         int ret;
156
157         if (adapter->params.rawf_size != 0) {
158                 ret = cxgbe_mpstcam_rawf_enable(pi);
159                 if (ret < 0)
160                         return ret;
161         }
162
163         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
164                              1, -1, 1, -1, false);
165 }
166
167 int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
168 {
169         struct port_info *pi = eth_dev->data->dev_private;
170         struct adapter *adapter = pi->adapter;
171         int ret;
172
173         if (adapter->params.rawf_size != 0) {
174                 ret = cxgbe_mpstcam_rawf_disable(pi);
175                 if (ret < 0)
176                         return ret;
177         }
178
179         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
180                              0, -1, 1, -1, false);
181 }
182
183 int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
184 {
185         struct port_info *pi = eth_dev->data->dev_private;
186         struct adapter *adapter = pi->adapter;
187
188         /* TODO: address filters ?? */
189
190         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
191                              -1, 1, 1, -1, false);
192 }
193
194 int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
195 {
196         struct port_info *pi = eth_dev->data->dev_private;
197         struct adapter *adapter = pi->adapter;
198
199         /* TODO: address filters ?? */
200
201         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
202                              -1, 0, 1, -1, false);
203 }
204
205 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
206                           int wait_to_complete)
207 {
208         struct port_info *pi = eth_dev->data->dev_private;
209         unsigned int i, work_done, budget = 32;
210         struct link_config *lc = &pi->link_cfg;
211         struct adapter *adapter = pi->adapter;
212         struct rte_eth_link new_link = { 0 };
213         u8 old_link = pi->link_cfg.link_ok;
214         struct sge *s = &adapter->sge;
215
216         for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
217                 if (!s->fw_evtq.desc)
218                         break;
219
220                 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
221
222                 /* Exit if link status changed or always forced up */
223                 if (pi->link_cfg.link_ok != old_link ||
224                     cxgbe_force_linkup(adapter))
225                         break;
226
227                 if (!wait_to_complete)
228                         break;
229
230                 rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
231         }
232
233         new_link.link_status = cxgbe_force_linkup(adapter) ?
234                                ETH_LINK_UP : pi->link_cfg.link_ok;
235         new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
236         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
237         new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
238
239         return rte_eth_linkstatus_set(eth_dev, &new_link);
240 }
241
242 /**
243  * Set device link up.
244  */
245 int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
246 {
247         struct port_info *pi = dev->data->dev_private;
248         struct adapter *adapter = pi->adapter;
249         unsigned int work_done, budget = 32;
250         struct sge *s = &adapter->sge;
251         int ret;
252
253         if (!s->fw_evtq.desc)
254                 return -ENOMEM;
255
256         /* Flush all link events */
257         cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
258
259         /* If link already up, nothing to do */
260         if (pi->link_cfg.link_ok)
261                 return 0;
262
263         ret = cxgbe_set_link_status(pi, true);
264         if (ret)
265                 return ret;
266
267         cxgbe_dev_link_update(dev, 1);
268         return 0;
269 }
270
271 /**
272  * Set device link down.
273  */
274 int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
275 {
276         struct port_info *pi = dev->data->dev_private;
277         struct adapter *adapter = pi->adapter;
278         unsigned int work_done, budget = 32;
279         struct sge *s = &adapter->sge;
280         int ret;
281
282         if (!s->fw_evtq.desc)
283                 return -ENOMEM;
284
285         /* Flush all link events */
286         cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
287
288         /* If link already down, nothing to do */
289         if (!pi->link_cfg.link_ok)
290                 return 0;
291
292         ret = cxgbe_set_link_status(pi, false);
293         if (ret)
294                 return ret;
295
296         cxgbe_dev_link_update(dev, 0);
297         return 0;
298 }
299
300 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
301 {
302         struct port_info *pi = eth_dev->data->dev_private;
303         struct adapter *adapter = pi->adapter;
304         struct rte_eth_dev_info dev_info;
305         int err;
306         uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
307
308         err = cxgbe_dev_info_get(eth_dev, &dev_info);
309         if (err != 0)
310                 return err;
311
312         /* Must accommodate at least RTE_ETHER_MIN_MTU */
313         if (mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
314                 return -EINVAL;
315
316         /* set to jumbo mode if needed */
317         if (mtu > RTE_ETHER_MTU)
318                 eth_dev->data->dev_conf.rxmode.offloads |=
319                         DEV_RX_OFFLOAD_JUMBO_FRAME;
320         else
321                 eth_dev->data->dev_conf.rxmode.offloads &=
322                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
323
324         err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
325                             -1, -1, true);
326         return err;
327 }
328
329 /*
330  * Stop device.
331  */
332 int cxgbe_dev_close(struct rte_eth_dev *eth_dev)
333 {
334         struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
335         struct adapter *adapter = pi->adapter;
336         u8 i;
337
338         CXGBE_FUNC_TRACE();
339
340         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
341                 return 0;
342
343         if (!(adapter->flags & FULL_INIT_DONE))
344                 return 0;
345
346         if (!pi->viid)
347                 return 0;
348
349         cxgbe_down(pi);
350         t4_sge_eth_release_queues(pi);
351         t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid);
352         pi->viid = 0;
353
354         /* Free up the adapter-wide resources only after all the ports
355          * under this PF have been closed.
356          */
357         for_each_port(adapter, i) {
358                 temp_pi = adap2pinfo(adapter, i);
359                 if (temp_pi->viid)
360                         return 0;
361         }
362
363         cxgbe_close(adapter);
364         rte_free(adapter);
365
366         return 0;
367 }
368
369 /* Start the device.
370  * It returns 0 on success.
371  */
372 int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
373 {
374         struct port_info *pi = eth_dev->data->dev_private;
375         struct rte_eth_rxmode *rx_conf = &eth_dev->data->dev_conf.rxmode;
376         struct adapter *adapter = pi->adapter;
377         int err = 0, i;
378
379         CXGBE_FUNC_TRACE();
380
381         /*
382          * If we don't have a connection to the firmware there's nothing we
383          * can do.
384          */
385         if (!(adapter->flags & FW_OK)) {
386                 err = -ENXIO;
387                 goto out;
388         }
389
390         if (!(adapter->flags & FULL_INIT_DONE)) {
391                 err = cxgbe_up(adapter);
392                 if (err < 0)
393                         goto out;
394         }
395
396         if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
397                 eth_dev->data->scattered_rx = 1;
398         else
399                 eth_dev->data->scattered_rx = 0;
400
401         cxgbe_enable_rx_queues(pi);
402
403         err = cxgbe_setup_rss(pi);
404         if (err)
405                 goto out;
406
407         for (i = 0; i < pi->n_tx_qsets; i++) {
408                 err = cxgbe_dev_tx_queue_start(eth_dev, i);
409                 if (err)
410                         goto out;
411         }
412
413         for (i = 0; i < pi->n_rx_qsets; i++) {
414                 err = cxgbe_dev_rx_queue_start(eth_dev, i);
415                 if (err)
416                         goto out;
417         }
418
419         err = cxgbe_link_start(pi);
420         if (err)
421                 goto out;
422
423 out:
424         return err;
425 }
426
427 /*
428  * Stop device: disable rx and tx functions to allow for reconfiguring.
429  */
430 int cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
431 {
432         struct port_info *pi = eth_dev->data->dev_private;
433         struct adapter *adapter = pi->adapter;
434
435         CXGBE_FUNC_TRACE();
436
437         if (!(adapter->flags & FULL_INIT_DONE))
438                 return 0;
439
440         cxgbe_down(pi);
441
442         /*
443          *  We clear queues only if both tx and rx path of the port
444          *  have been disabled
445          */
446         t4_sge_eth_clear_queues(pi);
447         eth_dev->data->scattered_rx = 0;
448
449         return 0;
450 }
451
452 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
453 {
454         struct port_info *pi = eth_dev->data->dev_private;
455         struct adapter *adapter = pi->adapter;
456         int err;
457
458         CXGBE_FUNC_TRACE();
459
460         if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
461                 eth_dev->data->dev_conf.rxmode.offloads |=
462                         DEV_RX_OFFLOAD_RSS_HASH;
463
464         if (!(adapter->flags & FW_QUEUE_BOUND)) {
465                 err = cxgbe_setup_sge_fwevtq(adapter);
466                 if (err)
467                         return err;
468                 adapter->flags |= FW_QUEUE_BOUND;
469                 if (is_pf4(adapter)) {
470                         err = cxgbe_setup_sge_ctrl_txq(adapter);
471                         if (err)
472                                 return err;
473                 }
474         }
475
476         err = cxgbe_cfg_queue_count(eth_dev);
477         if (err)
478                 return err;
479
480         return 0;
481 }
482
483 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
484 {
485         int ret;
486         struct sge_eth_txq *txq = (struct sge_eth_txq *)
487                                   (eth_dev->data->tx_queues[tx_queue_id]);
488
489         dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
490
491         ret = t4_sge_eth_txq_start(txq);
492         if (ret == 0)
493                 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
494
495         return ret;
496 }
497
498 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
499 {
500         int ret;
501         struct sge_eth_txq *txq = (struct sge_eth_txq *)
502                                   (eth_dev->data->tx_queues[tx_queue_id]);
503
504         dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
505
506         ret = t4_sge_eth_txq_stop(txq);
507         if (ret == 0)
508                 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
509
510         return ret;
511 }
512
513 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
514                              uint16_t queue_idx, uint16_t nb_desc,
515                              unsigned int socket_id,
516                              const struct rte_eth_txconf *tx_conf __rte_unused)
517 {
518         struct port_info *pi = eth_dev->data->dev_private;
519         struct adapter *adapter = pi->adapter;
520         struct sge *s = &adapter->sge;
521         unsigned int temp_nb_desc;
522         struct sge_eth_txq *txq;
523         int err = 0;
524
525         txq = &s->ethtxq[pi->first_txqset + queue_idx];
526         dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
527                   __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
528                   socket_id, pi->first_txqset);
529
530         /*  Free up the existing queue  */
531         if (eth_dev->data->tx_queues[queue_idx]) {
532                 cxgbe_dev_tx_queue_release(eth_dev, queue_idx);
533                 eth_dev->data->tx_queues[queue_idx] = NULL;
534         }
535
536         eth_dev->data->tx_queues[queue_idx] = (void *)txq;
537
538         /* Sanity Checking
539          *
540          * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
541          */
542         temp_nb_desc = nb_desc;
543         if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
544                 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
545                          __func__, CXGBE_MIN_RING_DESC_SIZE,
546                          CXGBE_DEFAULT_TX_DESC_SIZE);
547                 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
548         } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
549                 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
550                         __func__, CXGBE_MIN_RING_DESC_SIZE,
551                         CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
552                 return -(EINVAL);
553         }
554
555         txq->q.size = temp_nb_desc;
556
557         err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
558                                    s->fw_evtq.cntxt_id, socket_id);
559
560         dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
561                   __func__, txq->q.cntxt_id, txq->q.abs_id, err);
562         return err;
563 }
564
565 void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
566 {
567         struct sge_eth_txq *txq = eth_dev->data->tx_queues[qid];
568
569         if (txq) {
570                 struct port_info *pi = (struct port_info *)
571                                        (txq->eth_dev->data->dev_private);
572                 struct adapter *adap = pi->adapter;
573
574                 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
575                           __func__, pi->port_id, txq->q.cntxt_id);
576
577                 t4_sge_eth_txq_release(adap, txq);
578         }
579 }
580
581 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
582 {
583         struct port_info *pi = eth_dev->data->dev_private;
584         struct adapter *adap = pi->adapter;
585         struct sge_eth_rxq *rxq;
586         int ret;
587
588         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
589                   __func__, pi->port_id, rx_queue_id);
590
591         rxq = eth_dev->data->rx_queues[rx_queue_id];
592         ret = t4_sge_eth_rxq_start(adap, rxq);
593         if (ret == 0)
594                 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
595
596         return ret;
597 }
598
599 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
600 {
601         struct port_info *pi = eth_dev->data->dev_private;
602         struct adapter *adap = pi->adapter;
603         struct sge_eth_rxq *rxq;
604         int ret;
605
606         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
607                   __func__, pi->port_id, rx_queue_id);
608
609         rxq = eth_dev->data->rx_queues[rx_queue_id];
610         ret = t4_sge_eth_rxq_stop(adap, rxq);
611         if (ret == 0)
612                 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
613
614         return ret;
615 }
616
617 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
618                              uint16_t queue_idx, uint16_t nb_desc,
619                              unsigned int socket_id,
620                              const struct rte_eth_rxconf *rx_conf __rte_unused,
621                              struct rte_mempool *mp)
622 {
623         unsigned int pkt_len = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
624                 RTE_ETHER_CRC_LEN;
625         struct port_info *pi = eth_dev->data->dev_private;
626         struct adapter *adapter = pi->adapter;
627         struct rte_eth_dev_info dev_info;
628         struct sge *s = &adapter->sge;
629         unsigned int temp_nb_desc;
630         int err = 0, msi_idx = 0;
631         struct sge_eth_rxq *rxq;
632
633         rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
634         dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
635                   __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
636                   socket_id, mp);
637
638         err = cxgbe_dev_info_get(eth_dev, &dev_info);
639         if (err != 0) {
640                 dev_err(adap, "%s: error during getting ethernet device info",
641                         __func__);
642                 return err;
643         }
644
645         /* Must accommodate at least RTE_ETHER_MIN_MTU */
646         if ((pkt_len < dev_info.min_rx_bufsize) ||
647             (pkt_len > dev_info.max_rx_pktlen)) {
648                 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
649                         __func__, dev_info.min_rx_bufsize,
650                         dev_info.max_rx_pktlen);
651                 return -EINVAL;
652         }
653
654         /*  Free up the existing queue  */
655         if (eth_dev->data->rx_queues[queue_idx]) {
656                 cxgbe_dev_rx_queue_release(eth_dev, queue_idx);
657                 eth_dev->data->rx_queues[queue_idx] = NULL;
658         }
659
660         eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
661
662         /* Sanity Checking
663          *
664          * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
665          */
666         temp_nb_desc = nb_desc;
667         if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
668                 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
669                          __func__, CXGBE_MIN_RING_DESC_SIZE,
670                          CXGBE_DEFAULT_RX_DESC_SIZE);
671                 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
672         } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
673                 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
674                         __func__, CXGBE_MIN_RING_DESC_SIZE,
675                         CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
676                 return -(EINVAL);
677         }
678
679         rxq->rspq.size = temp_nb_desc;
680         rxq->fl.size = temp_nb_desc;
681
682         /* Set to jumbo mode if necessary */
683         if (eth_dev->data->mtu > RTE_ETHER_MTU)
684                 eth_dev->data->dev_conf.rxmode.offloads |=
685                         DEV_RX_OFFLOAD_JUMBO_FRAME;
686         else
687                 eth_dev->data->dev_conf.rxmode.offloads &=
688                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
689
690         err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
691                                &rxq->fl, NULL,
692                                is_pf4(adapter) ?
693                                t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
694                                queue_idx, socket_id);
695
696         dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
697                   __func__, err, pi->port_id, rxq->rspq.cntxt_id,
698                   rxq->rspq.abs_id);
699         return err;
700 }
701
702 void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
703 {
704         struct sge_eth_rxq *rxq = eth_dev->data->rx_queues[qid];
705
706         if (rxq) {
707                 struct port_info *pi = (struct port_info *)
708                                        (rxq->rspq.eth_dev->data->dev_private);
709                 struct adapter *adap = pi->adapter;
710
711                 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
712                           __func__, pi->port_id, rxq->rspq.cntxt_id);
713
714                 t4_sge_eth_rxq_release(adap, rxq);
715         }
716 }
717
718 /*
719  * Get port statistics.
720  */
721 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
722                                 struct rte_eth_stats *eth_stats)
723 {
724         struct port_info *pi = eth_dev->data->dev_private;
725         struct adapter *adapter = pi->adapter;
726         struct sge *s = &adapter->sge;
727         struct port_stats ps;
728         unsigned int i;
729
730         cxgbe_stats_get(pi, &ps);
731
732         /* RX Stats */
733         eth_stats->imissed  = ps.rx_ovflow0 + ps.rx_ovflow1 +
734                               ps.rx_ovflow2 + ps.rx_ovflow3 +
735                               ps.rx_trunc0 + ps.rx_trunc1 +
736                               ps.rx_trunc2 + ps.rx_trunc3;
737         eth_stats->ierrors  = ps.rx_symbol_err + ps.rx_fcs_err +
738                               ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
739                               ps.rx_len_err;
740
741         /* TX Stats */
742         eth_stats->opackets = ps.tx_frames;
743         eth_stats->obytes   = ps.tx_octets;
744         eth_stats->oerrors  = ps.tx_error_frames;
745
746         for (i = 0; i < pi->n_rx_qsets; i++) {
747                 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
748
749                 eth_stats->ipackets += rxq->stats.pkts;
750                 eth_stats->ibytes += rxq->stats.rx_bytes;
751         }
752
753         return 0;
754 }
755
756 /*
757  * Reset port statistics.
758  */
759 static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
760 {
761         struct port_info *pi = eth_dev->data->dev_private;
762         struct adapter *adapter = pi->adapter;
763         struct sge *s = &adapter->sge;
764         unsigned int i;
765
766         cxgbe_stats_reset(pi);
767         for (i = 0; i < pi->n_rx_qsets; i++) {
768                 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
769
770                 memset(&rxq->stats, 0, sizeof(rxq->stats));
771         }
772         for (i = 0; i < pi->n_tx_qsets; i++) {
773                 struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + i];
774
775                 memset(&txq->stats, 0, sizeof(txq->stats));
776         }
777
778         return 0;
779 }
780
781 /* Store extended statistics names and its offset in stats structure  */
782 struct cxgbe_dev_xstats_name_off {
783         char name[RTE_ETH_XSTATS_NAME_SIZE];
784         unsigned int offset;
785 };
786
787 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_rxq_stats_strings[] = {
788         {"packets", offsetof(struct sge_eth_rx_stats, pkts)},
789         {"bytes", offsetof(struct sge_eth_rx_stats, rx_bytes)},
790         {"checksum_offloads", offsetof(struct sge_eth_rx_stats, rx_cso)},
791         {"vlan_extractions", offsetof(struct sge_eth_rx_stats, vlan_ex)},
792         {"dropped_packets", offsetof(struct sge_eth_rx_stats, rx_drops)},
793 };
794
795 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_txq_stats_strings[] = {
796         {"packets", offsetof(struct sge_eth_tx_stats, pkts)},
797         {"bytes", offsetof(struct sge_eth_tx_stats, tx_bytes)},
798         {"tso_requests", offsetof(struct sge_eth_tx_stats, tso)},
799         {"checksum_offloads", offsetof(struct sge_eth_tx_stats, tx_cso)},
800         {"vlan_insertions", offsetof(struct sge_eth_tx_stats, vlan_ins)},
801         {"packet_mapping_errors",
802          offsetof(struct sge_eth_tx_stats, mapping_err)},
803         {"coalesced_wrs", offsetof(struct sge_eth_tx_stats, coal_wr)},
804         {"coalesced_packets", offsetof(struct sge_eth_tx_stats, coal_pkts)},
805 };
806
807 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_port_stats_strings[] = {
808         {"tx_bytes", offsetof(struct port_stats, tx_octets)},
809         {"tx_packets", offsetof(struct port_stats, tx_frames)},
810         {"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
811         {"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
812         {"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
813         {"tx_error_packets", offsetof(struct port_stats, tx_error_frames)},
814         {"tx_size_64_packets", offsetof(struct port_stats, tx_frames_64)},
815         {"tx_size_65_to_127_packets",
816          offsetof(struct port_stats, tx_frames_65_127)},
817         {"tx_size_128_to_255_packets",
818          offsetof(struct port_stats, tx_frames_128_255)},
819         {"tx_size_256_to_511_packets",
820          offsetof(struct port_stats, tx_frames_256_511)},
821         {"tx_size_512_to_1023_packets",
822          offsetof(struct port_stats, tx_frames_512_1023)},
823         {"tx_size_1024_to_1518_packets",
824          offsetof(struct port_stats, tx_frames_1024_1518)},
825         {"tx_size_1519_to_max_packets",
826          offsetof(struct port_stats, tx_frames_1519_max)},
827         {"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
828         {"tx_pause_frames", offsetof(struct port_stats, tx_pause)},
829         {"tx_ppp_pri0_packets", offsetof(struct port_stats, tx_ppp0)},
830         {"tx_ppp_pri1_packets", offsetof(struct port_stats, tx_ppp1)},
831         {"tx_ppp_pri2_packets", offsetof(struct port_stats, tx_ppp2)},
832         {"tx_ppp_pri3_packets", offsetof(struct port_stats, tx_ppp3)},
833         {"tx_ppp_pri4_packets", offsetof(struct port_stats, tx_ppp4)},
834         {"tx_ppp_pri5_packets", offsetof(struct port_stats, tx_ppp5)},
835         {"tx_ppp_pri6_packets", offsetof(struct port_stats, tx_ppp6)},
836         {"tx_ppp_pri7_packets", offsetof(struct port_stats, tx_ppp7)},
837         {"rx_bytes", offsetof(struct port_stats, rx_octets)},
838         {"rx_packets", offsetof(struct port_stats, rx_frames)},
839         {"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
840         {"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
841         {"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
842         {"rx_too_long_packets", offsetof(struct port_stats, rx_too_long)},
843         {"rx_jabber_packets", offsetof(struct port_stats, rx_jabber)},
844         {"rx_fcs_error_packets", offsetof(struct port_stats, rx_fcs_err)},
845         {"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
846         {"rx_symbol_error_packets",
847          offsetof(struct port_stats, rx_symbol_err)},
848         {"rx_short_packets", offsetof(struct port_stats, rx_runt)},
849         {"rx_size_64_packets", offsetof(struct port_stats, rx_frames_64)},
850         {"rx_size_65_to_127_packets",
851          offsetof(struct port_stats, rx_frames_65_127)},
852         {"rx_size_128_to_255_packets",
853          offsetof(struct port_stats, rx_frames_128_255)},
854         {"rx_size_256_to_511_packets",
855          offsetof(struct port_stats, rx_frames_256_511)},
856         {"rx_size_512_to_1023_packets",
857          offsetof(struct port_stats, rx_frames_512_1023)},
858         {"rx_size_1024_to_1518_packets",
859          offsetof(struct port_stats, rx_frames_1024_1518)},
860         {"rx_size_1519_to_max_packets",
861          offsetof(struct port_stats, rx_frames_1519_max)},
862         {"rx_pause_packets", offsetof(struct port_stats, rx_pause)},
863         {"rx_ppp_pri0_packets", offsetof(struct port_stats, rx_ppp0)},
864         {"rx_ppp_pri1_packets", offsetof(struct port_stats, rx_ppp1)},
865         {"rx_ppp_pri2_packets", offsetof(struct port_stats, rx_ppp2)},
866         {"rx_ppp_pri3_packets", offsetof(struct port_stats, rx_ppp3)},
867         {"rx_ppp_pri4_packets", offsetof(struct port_stats, rx_ppp4)},
868         {"rx_ppp_pri5_packets", offsetof(struct port_stats, rx_ppp5)},
869         {"rx_ppp_pri6_packets", offsetof(struct port_stats, rx_ppp6)},
870         {"rx_ppp_pri7_packets", offsetof(struct port_stats, rx_ppp7)},
871         {"rx_bg0_dropped_packets", offsetof(struct port_stats, rx_ovflow0)},
872         {"rx_bg1_dropped_packets", offsetof(struct port_stats, rx_ovflow1)},
873         {"rx_bg2_dropped_packets", offsetof(struct port_stats, rx_ovflow2)},
874         {"rx_bg3_dropped_packets", offsetof(struct port_stats, rx_ovflow3)},
875         {"rx_bg0_truncated_packets", offsetof(struct port_stats, rx_trunc0)},
876         {"rx_bg1_truncated_packets", offsetof(struct port_stats, rx_trunc1)},
877         {"rx_bg2_truncated_packets", offsetof(struct port_stats, rx_trunc2)},
878         {"rx_bg3_truncated_packets", offsetof(struct port_stats, rx_trunc3)},
879 };
880
881 static const struct cxgbe_dev_xstats_name_off
882 cxgbevf_dev_port_stats_strings[] = {
883         {"tx_bytes", offsetof(struct port_stats, tx_octets)},
884         {"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
885         {"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
886         {"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
887         {"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
888         {"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
889         {"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
890         {"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
891         {"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
892 };
893
894 #define CXGBE_NB_RXQ_STATS RTE_DIM(cxgbe_dev_rxq_stats_strings)
895 #define CXGBE_NB_TXQ_STATS RTE_DIM(cxgbe_dev_txq_stats_strings)
896 #define CXGBE_NB_PORT_STATS RTE_DIM(cxgbe_dev_port_stats_strings)
897 #define CXGBEVF_NB_PORT_STATS RTE_DIM(cxgbevf_dev_port_stats_strings)
898
899 static u16 cxgbe_dev_xstats_count(struct port_info *pi)
900 {
901         u16 count;
902
903         count = (pi->n_tx_qsets * CXGBE_NB_TXQ_STATS) +
904                 (pi->n_rx_qsets * CXGBE_NB_RXQ_STATS);
905
906         if (is_pf4(pi->adapter) != 0)
907                 count += CXGBE_NB_PORT_STATS;
908         else
909                 count += CXGBEVF_NB_PORT_STATS;
910
911         return count;
912 }
913
914 static int cxgbe_dev_xstats(struct rte_eth_dev *dev,
915                             struct rte_eth_xstat_name *xstats_names,
916                             struct rte_eth_xstat *xstats, unsigned int size)
917 {
918         const struct cxgbe_dev_xstats_name_off *xstats_str;
919         struct port_info *pi = dev->data->dev_private;
920         struct adapter *adap = pi->adapter;
921         struct sge *s = &adap->sge;
922         u16 count, i, qid, nstats;
923         struct port_stats ps;
924         u64 *stats_ptr;
925
926         count = cxgbe_dev_xstats_count(pi);
927         if (size < count)
928                 return count;
929
930         if (is_pf4(adap) != 0) {
931                 /* port stats for PF*/
932                 cxgbe_stats_get(pi, &ps);
933                 xstats_str = cxgbe_dev_port_stats_strings;
934                 nstats = CXGBE_NB_PORT_STATS;
935         } else {
936                 /* port stats for VF*/
937                 cxgbevf_stats_get(pi, &ps);
938                 xstats_str = cxgbevf_dev_port_stats_strings;
939                 nstats = CXGBEVF_NB_PORT_STATS;
940         }
941
942         count = 0;
943         for (i = 0; i < nstats; i++, count++) {
944                 if (xstats_names != NULL)
945                         snprintf(xstats_names[count].name,
946                                  sizeof(xstats_names[count].name),
947                                  "%s", xstats_str[i].name);
948                 if (xstats != NULL) {
949                         stats_ptr = RTE_PTR_ADD(&ps,
950                                                 xstats_str[i].offset);
951                         xstats[count].value = *stats_ptr;
952                         xstats[count].id = count;
953                 }
954         }
955
956         /* per-txq stats */
957         xstats_str = cxgbe_dev_txq_stats_strings;
958         for (qid = 0; qid < pi->n_tx_qsets; qid++) {
959                 struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + qid];
960
961                 for (i = 0; i < CXGBE_NB_TXQ_STATS; i++, count++) {
962                         if (xstats_names != NULL)
963                                 snprintf(xstats_names[count].name,
964                                          sizeof(xstats_names[count].name),
965                                          "tx_q%u_%s",
966                                          qid, xstats_str[i].name);
967                         if (xstats != NULL) {
968                                 stats_ptr = RTE_PTR_ADD(&txq->stats,
969                                                         xstats_str[i].offset);
970                                 xstats[count].value = *stats_ptr;
971                                 xstats[count].id = count;
972                         }
973                 }
974         }
975
976         /* per-rxq stats */
977         xstats_str = cxgbe_dev_rxq_stats_strings;
978         for (qid = 0; qid < pi->n_rx_qsets; qid++) {
979                 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + qid];
980
981                 for (i = 0; i < CXGBE_NB_RXQ_STATS; i++, count++) {
982                         if (xstats_names != NULL)
983                                 snprintf(xstats_names[count].name,
984                                          sizeof(xstats_names[count].name),
985                                          "rx_q%u_%s",
986                                          qid, xstats_str[i].name);
987                         if (xstats != NULL) {
988                                 stats_ptr = RTE_PTR_ADD(&rxq->stats,
989                                                         xstats_str[i].offset);
990                                 xstats[count].value = *stats_ptr;
991                                 xstats[count].id = count;
992                         }
993                 }
994         }
995
996         return count;
997 }
998
999 /* Get port extended statistics by ID. */
1000 int cxgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
1001                                const uint64_t *ids, uint64_t *values,
1002                                unsigned int n)
1003 {
1004         struct port_info *pi = dev->data->dev_private;
1005         struct rte_eth_xstat *xstats_copy;
1006         u16 count, i;
1007         int ret = 0;
1008
1009         count = cxgbe_dev_xstats_count(pi);
1010         if (ids == NULL || values == NULL)
1011                 return count;
1012
1013         xstats_copy = rte_calloc(NULL, count, sizeof(*xstats_copy), 0);
1014         if (xstats_copy == NULL)
1015                 return -ENOMEM;
1016
1017         cxgbe_dev_xstats(dev, NULL, xstats_copy, count);
1018
1019         for (i = 0; i < n; i++) {
1020                 if (ids[i] >= count) {
1021                         ret = -EINVAL;
1022                         goto out_err;
1023                 }
1024                 values[i] = xstats_copy[ids[i]].value;
1025         }
1026
1027         ret = n;
1028
1029 out_err:
1030         rte_free(xstats_copy);
1031         return ret;
1032 }
1033
1034 /* Get names of port extended statistics by ID. */
1035 int cxgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1036                                             const uint64_t *ids,
1037                                             struct rte_eth_xstat_name *xnames,
1038                                             unsigned int n)
1039 {
1040         struct port_info *pi = dev->data->dev_private;
1041         struct rte_eth_xstat_name *xnames_copy;
1042         u16 count, i;
1043         int ret = 0;
1044
1045         count = cxgbe_dev_xstats_count(pi);
1046         if (ids == NULL || xnames == NULL)
1047                 return count;
1048
1049         xnames_copy = rte_calloc(NULL, count, sizeof(*xnames_copy), 0);
1050         if (xnames_copy == NULL)
1051                 return -ENOMEM;
1052
1053         cxgbe_dev_xstats(dev, xnames_copy, NULL, count);
1054
1055         for (i = 0; i < n; i++) {
1056                 if (ids[i] >= count) {
1057                         ret = -EINVAL;
1058                         goto out_err;
1059                 }
1060                 rte_strlcpy(xnames[i].name, xnames_copy[ids[i]].name,
1061                             sizeof(xnames[i].name));
1062         }
1063
1064         ret = n;
1065
1066 out_err:
1067         rte_free(xnames_copy);
1068         return ret;
1069 }
1070
1071 /* Get port extended statistics. */
1072 int cxgbe_dev_xstats_get(struct rte_eth_dev *dev,
1073                          struct rte_eth_xstat *xstats, unsigned int n)
1074 {
1075         return cxgbe_dev_xstats(dev, NULL, xstats, n);
1076 }
1077
1078 /* Get names of port extended statistics. */
1079 int cxgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1080                                struct rte_eth_xstat_name *xstats_names,
1081                                unsigned int n)
1082 {
1083         return cxgbe_dev_xstats(dev, xstats_names, NULL, n);
1084 }
1085
1086 /* Reset port extended statistics. */
1087 static int cxgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1088 {
1089         return cxgbe_dev_stats_reset(dev);
1090 }
1091
1092 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1093                                struct rte_eth_fc_conf *fc_conf)
1094 {
1095         struct port_info *pi = eth_dev->data->dev_private;
1096         struct link_config *lc = &pi->link_cfg;
1097         u8 rx_pause = 0, tx_pause = 0;
1098         u32 caps = lc->link_caps;
1099
1100         if (caps & FW_PORT_CAP32_ANEG)
1101                 fc_conf->autoneg = 1;
1102
1103         if (caps & FW_PORT_CAP32_FC_TX)
1104                 tx_pause = 1;
1105
1106         if (caps & FW_PORT_CAP32_FC_RX)
1107                 rx_pause = 1;
1108
1109         if (rx_pause && tx_pause)
1110                 fc_conf->mode = RTE_FC_FULL;
1111         else if (rx_pause)
1112                 fc_conf->mode = RTE_FC_RX_PAUSE;
1113         else if (tx_pause)
1114                 fc_conf->mode = RTE_FC_TX_PAUSE;
1115         else
1116                 fc_conf->mode = RTE_FC_NONE;
1117         return 0;
1118 }
1119
1120 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1121                                struct rte_eth_fc_conf *fc_conf)
1122 {
1123         struct port_info *pi = eth_dev->data->dev_private;
1124         struct link_config *lc = &pi->link_cfg;
1125         u32 new_caps = lc->admin_caps;
1126         u8 tx_pause = 0, rx_pause = 0;
1127         int ret;
1128
1129         if (fc_conf->mode == RTE_FC_FULL) {
1130                 tx_pause = 1;
1131                 rx_pause = 1;
1132         } else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
1133                 tx_pause = 1;
1134         } else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
1135                 rx_pause = 1;
1136         }
1137
1138         ret = t4_set_link_pause(pi, fc_conf->autoneg, tx_pause,
1139                                 rx_pause, &new_caps);
1140         if (ret != 0)
1141                 return ret;
1142
1143         if (!fc_conf->autoneg) {
1144                 if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
1145                         new_caps |= FW_PORT_CAP32_FORCE_PAUSE;
1146         } else {
1147                 new_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
1148         }
1149
1150         if (new_caps != lc->admin_caps) {
1151                 ret = t4_link_l1cfg(pi, new_caps);
1152                 if (ret == 0)
1153                         lc->admin_caps = new_caps;
1154         }
1155
1156         return ret;
1157 }
1158
1159 const uint32_t *
1160 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1161 {
1162         static const uint32_t ptypes[] = {
1163                 RTE_PTYPE_L3_IPV4,
1164                 RTE_PTYPE_L3_IPV6,
1165                 RTE_PTYPE_UNKNOWN
1166         };
1167
1168         if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
1169                 return ptypes;
1170         return NULL;
1171 }
1172
1173 /* Update RSS hash configuration
1174  */
1175 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
1176                                      struct rte_eth_rss_conf *rss_conf)
1177 {
1178         struct port_info *pi = dev->data->dev_private;
1179         struct adapter *adapter = pi->adapter;
1180         int err;
1181
1182         err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
1183         if (err)
1184                 return err;
1185
1186         pi->rss_hf = rss_conf->rss_hf;
1187
1188         if (rss_conf->rss_key) {
1189                 u32 key[10], mod_key[10];
1190                 int i, j;
1191
1192                 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
1193
1194                 for (i = 9, j = 0; i >= 0; i--, j++)
1195                         mod_key[j] = cpu_to_be32(key[i]);
1196
1197                 t4_write_rss_key(adapter, mod_key, -1);
1198         }
1199
1200         return 0;
1201 }
1202
1203 /* Get RSS hash configuration
1204  */
1205 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1206                                        struct rte_eth_rss_conf *rss_conf)
1207 {
1208         struct port_info *pi = dev->data->dev_private;
1209         struct adapter *adapter = pi->adapter;
1210         u64 rss_hf = 0;
1211         u64 flags = 0;
1212         int err;
1213
1214         err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
1215                                     &flags, NULL);
1216
1217         if (err)
1218                 return err;
1219
1220         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
1221                 rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
1222                 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1223                         rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
1224         }
1225
1226         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1227                 rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
1228
1229         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
1230                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1231                 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1232                         rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1233         }
1234
1235         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1236                 rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
1237
1238         rss_conf->rss_hf = rss_hf;
1239
1240         if (rss_conf->rss_key) {
1241                 u32 key[10], mod_key[10];
1242                 int i, j;
1243
1244                 t4_read_rss_key(adapter, key);
1245
1246                 for (i = 9, j = 0; i >= 0; i--, j++)
1247                         mod_key[j] = be32_to_cpu(key[i]);
1248
1249                 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
1250         }
1251
1252         return 0;
1253 }
1254
1255 static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
1256                                      struct rte_eth_rss_reta_entry64 *reta_conf,
1257                                      uint16_t reta_size)
1258 {
1259         struct port_info *pi = dev->data->dev_private;
1260         struct adapter *adapter = pi->adapter;
1261         u16 i, idx, shift, *rss;
1262         int ret;
1263
1264         if (!(adapter->flags & FULL_INIT_DONE))
1265                 return -ENOMEM;
1266
1267         if (!reta_size || reta_size > pi->rss_size)
1268                 return -EINVAL;
1269
1270         rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0);
1271         if (!rss)
1272                 return -ENOMEM;
1273
1274         rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
1275         for (i = 0; i < reta_size; i++) {
1276                 idx = i / RTE_RETA_GROUP_SIZE;
1277                 shift = i % RTE_RETA_GROUP_SIZE;
1278                 if (!(reta_conf[idx].mask & (1ULL << shift)))
1279                         continue;
1280
1281                 rss[i] = reta_conf[idx].reta[shift];
1282         }
1283
1284         ret = cxgbe_write_rss(pi, rss);
1285         if (!ret)
1286                 rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16));
1287
1288         rte_free(rss);
1289         return ret;
1290 }
1291
1292 static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
1293                                     struct rte_eth_rss_reta_entry64 *reta_conf,
1294                                     uint16_t reta_size)
1295 {
1296         struct port_info *pi = dev->data->dev_private;
1297         struct adapter *adapter = pi->adapter;
1298         u16 i, idx, shift;
1299
1300         if (!(adapter->flags & FULL_INIT_DONE))
1301                 return -ENOMEM;
1302
1303         if (!reta_size || reta_size > pi->rss_size)
1304                 return -EINVAL;
1305
1306         for (i = 0; i < reta_size; i++) {
1307                 idx = i / RTE_RETA_GROUP_SIZE;
1308                 shift = i % RTE_RETA_GROUP_SIZE;
1309                 if (!(reta_conf[idx].mask & (1ULL << shift)))
1310                         continue;
1311
1312                 reta_conf[idx].reta[shift] = pi->rss[i];
1313         }
1314
1315         return 0;
1316 }
1317
1318 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
1319 {
1320         RTE_SET_USED(dev);
1321         return EEPROMSIZE;
1322 }
1323
1324 /**
1325  * eeprom_ptov - translate a physical EEPROM address to virtual
1326  * @phys_addr: the physical EEPROM address
1327  * @fn: the PCI function number
1328  * @sz: size of function-specific area
1329  *
1330  * Translate a physical EEPROM address to virtual.  The first 1K is
1331  * accessed through virtual addresses starting at 31K, the rest is
1332  * accessed through virtual addresses starting at 0.
1333  *
1334  * The mapping is as follows:
1335  * [0..1K) -> [31K..32K)
1336  * [1K..1K+A) -> [31K-A..31K)
1337  * [1K+A..ES) -> [0..ES-A-1K)
1338  *
1339  * where A = @fn * @sz, and ES = EEPROM size.
1340  */
1341 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1342 {
1343         fn *= sz;
1344         if (phys_addr < 1024)
1345                 return phys_addr + (31 << 10);
1346         if (phys_addr < 1024 + fn)
1347                 return fn + phys_addr - 1024;
1348         if (phys_addr < EEPROMSIZE)
1349                 return phys_addr - 1024 - fn;
1350         if (phys_addr < EEPROMVSIZE)
1351                 return phys_addr - 1024;
1352         return -EINVAL;
1353 }
1354
1355 /* The next two routines implement eeprom read/write from physical addresses.
1356  */
1357 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1358 {
1359         int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1360
1361         if (vaddr >= 0)
1362                 vaddr = t4_seeprom_read(adap, vaddr, v);
1363         return vaddr < 0 ? vaddr : 0;
1364 }
1365
1366 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1367 {
1368         int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1369
1370         if (vaddr >= 0)
1371                 vaddr = t4_seeprom_write(adap, vaddr, v);
1372         return vaddr < 0 ? vaddr : 0;
1373 }
1374
1375 #define EEPROM_MAGIC 0x38E2F10C
1376
1377 static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
1378                             struct rte_dev_eeprom_info *e)
1379 {
1380         struct port_info *pi = dev->data->dev_private;
1381         struct adapter *adapter = pi->adapter;
1382         u32 i, err = 0;
1383         u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
1384
1385         if (!buf)
1386                 return -ENOMEM;
1387
1388         e->magic = EEPROM_MAGIC;
1389         for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
1390                 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1391
1392         if (!err)
1393                 rte_memcpy(e->data, buf + e->offset, e->length);
1394         rte_free(buf);
1395         return err;
1396 }
1397
1398 static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
1399                             struct rte_dev_eeprom_info *eeprom)
1400 {
1401         struct port_info *pi = dev->data->dev_private;
1402         struct adapter *adapter = pi->adapter;
1403         u8 *buf;
1404         int err = 0;
1405         u32 aligned_offset, aligned_len, *p;
1406
1407         if (eeprom->magic != EEPROM_MAGIC)
1408                 return -EINVAL;
1409
1410         aligned_offset = eeprom->offset & ~3;
1411         aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1412
1413         if (adapter->pf > 0) {
1414                 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1415
1416                 if (aligned_offset < start ||
1417                     aligned_offset + aligned_len > start + EEPROMPFSIZE)
1418                         return -EPERM;
1419         }
1420
1421         if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1422                 /* RMW possibly needed for first or last words.
1423                  */
1424                 buf = rte_zmalloc(NULL, aligned_len, 0);
1425                 if (!buf)
1426                         return -ENOMEM;
1427                 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1428                 if (!err && aligned_len > 4)
1429                         err = eeprom_rd_phys(adapter,
1430                                              aligned_offset + aligned_len - 4,
1431                                              (u32 *)&buf[aligned_len - 4]);
1432                 if (err)
1433                         goto out;
1434                 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1435                            eeprom->length);
1436         } else {
1437                 buf = eeprom->data;
1438         }
1439
1440         err = t4_seeprom_wp(adapter, false);
1441         if (err)
1442                 goto out;
1443
1444         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1445                 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1446                 aligned_offset += 4;
1447         }
1448
1449         if (!err)
1450                 err = t4_seeprom_wp(adapter, true);
1451 out:
1452         if (buf != eeprom->data)
1453                 rte_free(buf);
1454         return err;
1455 }
1456
1457 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
1458 {
1459         struct port_info *pi = eth_dev->data->dev_private;
1460         struct adapter *adapter = pi->adapter;
1461
1462         return t4_get_regs_len(adapter) / sizeof(uint32_t);
1463 }
1464
1465 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
1466                           struct rte_dev_reg_info *regs)
1467 {
1468         struct port_info *pi = eth_dev->data->dev_private;
1469         struct adapter *adapter = pi->adapter;
1470
1471         regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
1472                 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
1473                 (1 << 16);
1474
1475         if (regs->data == NULL) {
1476                 regs->length = cxgbe_get_regs_len(eth_dev);
1477                 regs->width = sizeof(uint32_t);
1478
1479                 return 0;
1480         }
1481
1482         t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1483
1484         return 0;
1485 }
1486
1487 int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1488 {
1489         struct port_info *pi = dev->data->dev_private;
1490         int ret;
1491
1492         ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
1493         if (ret < 0) {
1494                 dev_err(adapter, "failed to set mac addr; err = %d\n",
1495                         ret);
1496                 return ret;
1497         }
1498         pi->xact_addr_filt = ret;
1499         return 0;
1500 }
1501
1502 static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
1503                                            struct rte_eth_fec_capa *capa_arr)
1504 {
1505         int num = 0;
1506
1507         if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
1508                 if (capa_arr) {
1509                         capa_arr[num].speed = ETH_SPEED_NUM_100G;
1510                         capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1511                                              RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1512                 }
1513                 num++;
1514         }
1515
1516         if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
1517                 if (capa_arr) {
1518                         capa_arr[num].speed = ETH_SPEED_NUM_50G;
1519                         capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1520                                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1521                 }
1522                 num++;
1523         }
1524
1525         if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
1526                 if (capa_arr) {
1527                         capa_arr[num].speed = ETH_SPEED_NUM_25G;
1528                         capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1529                                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
1530                                              RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1531                 }
1532                 num++;
1533         }
1534
1535         return num;
1536 }
1537
1538 static int cxgbe_fec_get_capability(struct rte_eth_dev *dev,
1539                                     struct rte_eth_fec_capa *speed_fec_capa,
1540                                     unsigned int num)
1541 {
1542         struct port_info *pi = dev->data->dev_private;
1543         struct link_config *lc = &pi->link_cfg;
1544         u8 num_entries;
1545
1546         if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1547                 return -EOPNOTSUPP;
1548
1549         num_entries = cxgbe_fec_get_capa_speed_to_fec(lc, NULL);
1550         if (!speed_fec_capa || num < num_entries)
1551                 return num_entries;
1552
1553         return cxgbe_fec_get_capa_speed_to_fec(lc, speed_fec_capa);
1554 }
1555
1556 static int cxgbe_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
1557 {
1558         struct port_info *pi = dev->data->dev_private;
1559         struct link_config *lc = &pi->link_cfg;
1560         u32 fec_caps = 0, caps = lc->link_caps;
1561
1562         if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1563                 return -EOPNOTSUPP;
1564
1565         if (caps & FW_PORT_CAP32_FEC_RS)
1566                 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1567         else if (caps & FW_PORT_CAP32_FEC_BASER_RS)
1568                 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1569         else
1570                 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
1571
1572         *fec_capa = fec_caps;
1573         return 0;
1574 }
1575
1576 static int cxgbe_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
1577 {
1578         struct port_info *pi = dev->data->dev_private;
1579         u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
1580         struct link_config *lc = &pi->link_cfg;
1581         u32 new_caps = lc->admin_caps;
1582         int ret;
1583
1584         if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1585                 return -EOPNOTSUPP;
1586
1587         if (!fec_capa)
1588                 return -EINVAL;
1589
1590         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
1591                 goto set_fec;
1592
1593         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC))
1594                 fec_none = 1;
1595
1596         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
1597                 fec_baser = 1;
1598
1599         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
1600                 fec_rs = 1;
1601
1602 set_fec:
1603         ret = t4_set_link_fec(pi, fec_rs, fec_baser, fec_none, &new_caps);
1604         if (ret != 0)
1605                 return ret;
1606
1607         if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
1608                 new_caps |= FW_PORT_CAP32_FORCE_FEC;
1609         else
1610                 new_caps &= ~FW_PORT_CAP32_FORCE_FEC;
1611
1612         if (new_caps != lc->admin_caps) {
1613                 ret = t4_link_l1cfg(pi, new_caps);
1614                 if (ret == 0)
1615                         lc->admin_caps = new_caps;
1616         }
1617
1618         return ret;
1619 }
1620
1621 int cxgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1622                          size_t fw_size)
1623 {
1624         struct port_info *pi = dev->data->dev_private;
1625         struct adapter *adapter = pi->adapter;
1626         int ret;
1627
1628         if (adapter->params.fw_vers == 0)
1629                 return -EIO;
1630
1631         ret = snprintf(fw_version, fw_size, "%u.%u.%u.%u",
1632                        G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
1633                        G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
1634                        G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
1635                        G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
1636         if (ret < 0)
1637                 return -EINVAL;
1638
1639         ret += 1;
1640         if (fw_size < (size_t)ret)
1641                 return ret;
1642
1643         return 0;
1644 }
1645
1646 static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1647         .dev_start              = cxgbe_dev_start,
1648         .dev_stop               = cxgbe_dev_stop,
1649         .dev_close              = cxgbe_dev_close,
1650         .promiscuous_enable     = cxgbe_dev_promiscuous_enable,
1651         .promiscuous_disable    = cxgbe_dev_promiscuous_disable,
1652         .allmulticast_enable    = cxgbe_dev_allmulticast_enable,
1653         .allmulticast_disable   = cxgbe_dev_allmulticast_disable,
1654         .dev_configure          = cxgbe_dev_configure,
1655         .dev_infos_get          = cxgbe_dev_info_get,
1656         .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1657         .link_update            = cxgbe_dev_link_update,
1658         .dev_set_link_up        = cxgbe_dev_set_link_up,
1659         .dev_set_link_down      = cxgbe_dev_set_link_down,
1660         .mtu_set                = cxgbe_dev_mtu_set,
1661         .tx_queue_setup         = cxgbe_dev_tx_queue_setup,
1662         .tx_queue_start         = cxgbe_dev_tx_queue_start,
1663         .tx_queue_stop          = cxgbe_dev_tx_queue_stop,
1664         .tx_queue_release       = cxgbe_dev_tx_queue_release,
1665         .rx_queue_setup         = cxgbe_dev_rx_queue_setup,
1666         .rx_queue_start         = cxgbe_dev_rx_queue_start,
1667         .rx_queue_stop          = cxgbe_dev_rx_queue_stop,
1668         .rx_queue_release       = cxgbe_dev_rx_queue_release,
1669         .flow_ops_get           = cxgbe_dev_flow_ops_get,
1670         .stats_get              = cxgbe_dev_stats_get,
1671         .stats_reset            = cxgbe_dev_stats_reset,
1672         .xstats_get             = cxgbe_dev_xstats_get,
1673         .xstats_get_by_id       = cxgbe_dev_xstats_get_by_id,
1674         .xstats_get_names       = cxgbe_dev_xstats_get_names,
1675         .xstats_get_names_by_id = cxgbe_dev_xstats_get_names_by_id,
1676         .xstats_reset           = cxgbe_dev_xstats_reset,
1677         .flow_ctrl_get          = cxgbe_flow_ctrl_get,
1678         .flow_ctrl_set          = cxgbe_flow_ctrl_set,
1679         .get_eeprom_length      = cxgbe_get_eeprom_length,
1680         .get_eeprom             = cxgbe_get_eeprom,
1681         .set_eeprom             = cxgbe_set_eeprom,
1682         .get_reg                = cxgbe_get_regs,
1683         .rss_hash_update        = cxgbe_dev_rss_hash_update,
1684         .rss_hash_conf_get      = cxgbe_dev_rss_hash_conf_get,
1685         .mac_addr_set           = cxgbe_mac_addr_set,
1686         .reta_update            = cxgbe_dev_rss_reta_update,
1687         .reta_query             = cxgbe_dev_rss_reta_query,
1688         .fec_get_capability     = cxgbe_fec_get_capability,
1689         .fec_get                = cxgbe_fec_get,
1690         .fec_set                = cxgbe_fec_set,
1691         .fw_version_get         = cxgbe_fw_version_get,
1692 };
1693
1694 /*
1695  * Initialize driver
1696  * It returns 0 on success.
1697  */
1698 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1699 {
1700         struct rte_pci_device *pci_dev;
1701         struct port_info *pi = eth_dev->data->dev_private;
1702         struct adapter *adapter = NULL;
1703         char name[RTE_ETH_NAME_MAX_LEN];
1704         int err = 0;
1705
1706         CXGBE_FUNC_TRACE();
1707
1708         eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1709         eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1710         eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1711         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1712
1713         /* for secondary processes, we attach to ethdevs allocated by primary
1714          * and do minimal initialization.
1715          */
1716         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1717                 int i;
1718
1719                 for (i = 1; i < MAX_NPORTS; i++) {
1720                         struct rte_eth_dev *rest_eth_dev;
1721                         char namei[RTE_ETH_NAME_MAX_LEN];
1722
1723                         snprintf(namei, sizeof(namei), "%s_%d",
1724                                  pci_dev->device.name, i);
1725                         rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1726                         if (rest_eth_dev) {
1727                                 rest_eth_dev->device = &pci_dev->device;
1728                                 rest_eth_dev->dev_ops =
1729                                         eth_dev->dev_ops;
1730                                 rest_eth_dev->rx_pkt_burst =
1731                                         eth_dev->rx_pkt_burst;
1732                                 rest_eth_dev->tx_pkt_burst =
1733                                         eth_dev->tx_pkt_burst;
1734                                 rte_eth_dev_probing_finish(rest_eth_dev);
1735                         }
1736                 }
1737                 return 0;
1738         }
1739
1740         snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1741         adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1742         if (!adapter)
1743                 return -1;
1744
1745         adapter->use_unpacked_mode = 1;
1746         adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1747         if (!adapter->regs) {
1748                 dev_err(adapter, "%s: cannot map device registers\n", __func__);
1749                 err = -ENOMEM;
1750                 goto out_free_adapter;
1751         }
1752         adapter->pdev = pci_dev;
1753         adapter->eth_dev = eth_dev;
1754         pi->adapter = adapter;
1755
1756         cxgbe_process_devargs(adapter);
1757
1758         err = cxgbe_probe(adapter);
1759         if (err) {
1760                 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1761                         __func__, err);
1762                 goto out_free_adapter;
1763         }
1764
1765         return 0;
1766
1767 out_free_adapter:
1768         rte_free(adapter);
1769         return err;
1770 }
1771
1772 static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1773 {
1774         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1775         uint16_t port_id;
1776         int err = 0;
1777
1778         /* Free up other ports and all resources */
1779         RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1780                 err |= rte_eth_dev_close(port_id);
1781
1782         return err == 0 ? 0 : -EIO;
1783 }
1784
1785 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1786         struct rte_pci_device *pci_dev)
1787 {
1788         return rte_eth_dev_pci_generic_probe(pci_dev,
1789                 sizeof(struct port_info), eth_cxgbe_dev_init);
1790 }
1791
1792 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1793 {
1794         return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
1795 }
1796
1797 static struct rte_pci_driver rte_cxgbe_pmd = {
1798         .id_table = cxgb4_pci_tbl,
1799         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1800         .probe = eth_cxgbe_pci_probe,
1801         .remove = eth_cxgbe_pci_remove,
1802 };
1803
1804 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
1805 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
1806 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1807 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
1808                               CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
1809                               CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
1810                               CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
1811                               CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
1812 RTE_LOG_REGISTER_DEFAULT(cxgbe_logtype, NOTICE);
1813 RTE_LOG_REGISTER_SUFFIX(cxgbe_mbox_logtype, mbox, NOTICE);