net/mlx5: adjust hash bucket size
[dpdk.git] / drivers / net / cxgbe / cxgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Chelsio Communications.
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14 #include <netinet/in.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_bus_pci.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <ethdev_driver.h>
31 #include <ethdev_pci.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
34 #include <rte_dev.h>
35
36 #include "cxgbe.h"
37 #include "cxgbe_pfvf.h"
38 #include "cxgbe_flow.h"
39
40 /*
41  * Macros needed to support the PCI Device ID Table ...
42  */
43 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
44         static const struct rte_pci_id cxgb4_pci_tbl[] = {
45 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
46
47 #define PCI_VENDOR_ID_CHELSIO 0x1425
48
49 #define CH_PCI_ID_TABLE_ENTRY(devid) \
50                 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
51
52 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
53                 { .vendor_id = 0, } \
54         }
55
56 /*
57  *... and the PCI ID Table itself ...
58  */
59 #include "base/t4_pci_id_tbl.h"
60
61 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
62                          uint16_t nb_pkts)
63 {
64         struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
65         uint16_t pkts_sent, pkts_remain;
66         uint16_t total_sent = 0;
67         uint16_t idx = 0;
68         int ret = 0;
69
70         t4_os_lock(&txq->txq_lock);
71         /* free up desc from already completed tx */
72         reclaim_completed_tx(&txq->q);
73         if (unlikely(!nb_pkts))
74                 goto out_unlock;
75
76         rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
77         while (total_sent < nb_pkts) {
78                 pkts_remain = nb_pkts - total_sent;
79
80                 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
81                         idx = total_sent + pkts_sent;
82                         if ((idx + 1) < nb_pkts)
83                                 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
84                                                         volatile void *));
85                         ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
86                         if (ret < 0)
87                                 break;
88                 }
89                 if (!pkts_sent)
90                         break;
91                 total_sent += pkts_sent;
92                 /* reclaim as much as possible */
93                 reclaim_completed_tx(&txq->q);
94         }
95
96 out_unlock:
97         t4_os_unlock(&txq->txq_lock);
98         return total_sent;
99 }
100
101 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
102                          uint16_t nb_pkts)
103 {
104         struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
105         unsigned int work_done;
106
107         if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
108                 dev_err(adapter, "error in cxgbe poll\n");
109
110         return work_done;
111 }
112
113 int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
114                         struct rte_eth_dev_info *device_info)
115 {
116         struct port_info *pi = eth_dev->data->dev_private;
117         struct adapter *adapter = pi->adapter;
118
119         static const struct rte_eth_desc_lim cxgbe_desc_lim = {
120                 .nb_max = CXGBE_MAX_RING_DESC_SIZE,
121                 .nb_min = CXGBE_MIN_RING_DESC_SIZE,
122                 .nb_align = 1,
123         };
124
125         device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
126         device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
127         device_info->max_rx_queues = adapter->sge.max_ethqsets;
128         device_info->max_tx_queues = adapter->sge.max_ethqsets;
129         device_info->max_mac_addrs = 1;
130         /* XXX: For now we support one MAC/port */
131         device_info->max_vfs = adapter->params.arch.vfcount;
132         device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
133
134         device_info->rx_queue_offload_capa = 0UL;
135         device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
136
137         device_info->tx_queue_offload_capa = 0UL;
138         device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
139
140         device_info->reta_size = pi->rss_size;
141         device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
142         device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
143
144         device_info->rx_desc_lim = cxgbe_desc_lim;
145         device_info->tx_desc_lim = cxgbe_desc_lim;
146         cxgbe_get_speed_caps(pi, &device_info->speed_capa);
147
148         return 0;
149 }
150
151 int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
152 {
153         struct port_info *pi = eth_dev->data->dev_private;
154         struct adapter *adapter = pi->adapter;
155         int ret;
156
157         if (adapter->params.rawf_size != 0) {
158                 ret = cxgbe_mpstcam_rawf_enable(pi);
159                 if (ret < 0)
160                         return ret;
161         }
162
163         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
164                              1, -1, 1, -1, false);
165 }
166
167 int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
168 {
169         struct port_info *pi = eth_dev->data->dev_private;
170         struct adapter *adapter = pi->adapter;
171         int ret;
172
173         if (adapter->params.rawf_size != 0) {
174                 ret = cxgbe_mpstcam_rawf_disable(pi);
175                 if (ret < 0)
176                         return ret;
177         }
178
179         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
180                              0, -1, 1, -1, false);
181 }
182
183 int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
184 {
185         struct port_info *pi = eth_dev->data->dev_private;
186         struct adapter *adapter = pi->adapter;
187
188         /* TODO: address filters ?? */
189
190         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
191                              -1, 1, 1, -1, false);
192 }
193
194 int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
195 {
196         struct port_info *pi = eth_dev->data->dev_private;
197         struct adapter *adapter = pi->adapter;
198
199         /* TODO: address filters ?? */
200
201         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
202                              -1, 0, 1, -1, false);
203 }
204
205 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
206                           int wait_to_complete)
207 {
208         struct port_info *pi = eth_dev->data->dev_private;
209         unsigned int i, work_done, budget = 32;
210         struct link_config *lc = &pi->link_cfg;
211         struct adapter *adapter = pi->adapter;
212         struct rte_eth_link new_link = { 0 };
213         u8 old_link = pi->link_cfg.link_ok;
214         struct sge *s = &adapter->sge;
215
216         for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
217                 if (!s->fw_evtq.desc)
218                         break;
219
220                 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
221
222                 /* Exit if link status changed or always forced up */
223                 if (pi->link_cfg.link_ok != old_link ||
224                     cxgbe_force_linkup(adapter))
225                         break;
226
227                 if (!wait_to_complete)
228                         break;
229
230                 rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
231         }
232
233         new_link.link_status = cxgbe_force_linkup(adapter) ?
234                                ETH_LINK_UP : pi->link_cfg.link_ok;
235         new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
236         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
237         new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
238
239         return rte_eth_linkstatus_set(eth_dev, &new_link);
240 }
241
242 /**
243  * Set device link up.
244  */
245 int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
246 {
247         struct port_info *pi = dev->data->dev_private;
248         struct adapter *adapter = pi->adapter;
249         unsigned int work_done, budget = 32;
250         struct sge *s = &adapter->sge;
251         int ret;
252
253         if (!s->fw_evtq.desc)
254                 return -ENOMEM;
255
256         /* Flush all link events */
257         cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
258
259         /* If link already up, nothing to do */
260         if (pi->link_cfg.link_ok)
261                 return 0;
262
263         ret = cxgbe_set_link_status(pi, true);
264         if (ret)
265                 return ret;
266
267         cxgbe_dev_link_update(dev, 1);
268         return 0;
269 }
270
271 /**
272  * Set device link down.
273  */
274 int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
275 {
276         struct port_info *pi = dev->data->dev_private;
277         struct adapter *adapter = pi->adapter;
278         unsigned int work_done, budget = 32;
279         struct sge *s = &adapter->sge;
280         int ret;
281
282         if (!s->fw_evtq.desc)
283                 return -ENOMEM;
284
285         /* Flush all link events */
286         cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
287
288         /* If link already down, nothing to do */
289         if (!pi->link_cfg.link_ok)
290                 return 0;
291
292         ret = cxgbe_set_link_status(pi, false);
293         if (ret)
294                 return ret;
295
296         cxgbe_dev_link_update(dev, 0);
297         return 0;
298 }
299
300 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
301 {
302         struct port_info *pi = eth_dev->data->dev_private;
303         struct adapter *adapter = pi->adapter;
304         struct rte_eth_dev_info dev_info;
305         int err;
306         uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
307
308         err = cxgbe_dev_info_get(eth_dev, &dev_info);
309         if (err != 0)
310                 return err;
311
312         /* Must accommodate at least RTE_ETHER_MIN_MTU */
313         if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
314                 return -EINVAL;
315
316         /* set to jumbo mode if needed */
317         if (new_mtu > CXGBE_ETH_MAX_LEN)
318                 eth_dev->data->dev_conf.rxmode.offloads |=
319                         DEV_RX_OFFLOAD_JUMBO_FRAME;
320         else
321                 eth_dev->data->dev_conf.rxmode.offloads &=
322                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
323
324         err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
325                             -1, -1, true);
326         if (!err)
327                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
328
329         return err;
330 }
331
332 /*
333  * Stop device.
334  */
335 int cxgbe_dev_close(struct rte_eth_dev *eth_dev)
336 {
337         struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
338         struct adapter *adapter = pi->adapter;
339         u8 i;
340
341         CXGBE_FUNC_TRACE();
342
343         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
344                 return 0;
345
346         if (!(adapter->flags & FULL_INIT_DONE))
347                 return 0;
348
349         if (!pi->viid)
350                 return 0;
351
352         cxgbe_down(pi);
353         t4_sge_eth_release_queues(pi);
354         t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid);
355         pi->viid = 0;
356
357         /* Free up the adapter-wide resources only after all the ports
358          * under this PF have been closed.
359          */
360         for_each_port(adapter, i) {
361                 temp_pi = adap2pinfo(adapter, i);
362                 if (temp_pi->viid)
363                         return 0;
364         }
365
366         cxgbe_close(adapter);
367         rte_free(adapter);
368
369         return 0;
370 }
371
372 /* Start the device.
373  * It returns 0 on success.
374  */
375 int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
376 {
377         struct port_info *pi = eth_dev->data->dev_private;
378         struct rte_eth_rxmode *rx_conf = &eth_dev->data->dev_conf.rxmode;
379         struct adapter *adapter = pi->adapter;
380         int err = 0, i;
381
382         CXGBE_FUNC_TRACE();
383
384         /*
385          * If we don't have a connection to the firmware there's nothing we
386          * can do.
387          */
388         if (!(adapter->flags & FW_OK)) {
389                 err = -ENXIO;
390                 goto out;
391         }
392
393         if (!(adapter->flags & FULL_INIT_DONE)) {
394                 err = cxgbe_up(adapter);
395                 if (err < 0)
396                         goto out;
397         }
398
399         if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
400                 eth_dev->data->scattered_rx = 1;
401         else
402                 eth_dev->data->scattered_rx = 0;
403
404         cxgbe_enable_rx_queues(pi);
405
406         err = cxgbe_setup_rss(pi);
407         if (err)
408                 goto out;
409
410         for (i = 0; i < pi->n_tx_qsets; i++) {
411                 err = cxgbe_dev_tx_queue_start(eth_dev, i);
412                 if (err)
413                         goto out;
414         }
415
416         for (i = 0; i < pi->n_rx_qsets; i++) {
417                 err = cxgbe_dev_rx_queue_start(eth_dev, i);
418                 if (err)
419                         goto out;
420         }
421
422         err = cxgbe_link_start(pi);
423         if (err)
424                 goto out;
425
426 out:
427         return err;
428 }
429
430 /*
431  * Stop device: disable rx and tx functions to allow for reconfiguring.
432  */
433 int cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
434 {
435         struct port_info *pi = eth_dev->data->dev_private;
436         struct adapter *adapter = pi->adapter;
437
438         CXGBE_FUNC_TRACE();
439
440         if (!(adapter->flags & FULL_INIT_DONE))
441                 return 0;
442
443         cxgbe_down(pi);
444
445         /*
446          *  We clear queues only if both tx and rx path of the port
447          *  have been disabled
448          */
449         t4_sge_eth_clear_queues(pi);
450         eth_dev->data->scattered_rx = 0;
451
452         return 0;
453 }
454
455 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
456 {
457         struct port_info *pi = eth_dev->data->dev_private;
458         struct adapter *adapter = pi->adapter;
459         int err;
460
461         CXGBE_FUNC_TRACE();
462
463         if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
464                 eth_dev->data->dev_conf.rxmode.offloads |=
465                         DEV_RX_OFFLOAD_RSS_HASH;
466
467         if (!(adapter->flags & FW_QUEUE_BOUND)) {
468                 err = cxgbe_setup_sge_fwevtq(adapter);
469                 if (err)
470                         return err;
471                 adapter->flags |= FW_QUEUE_BOUND;
472                 if (is_pf4(adapter)) {
473                         err = cxgbe_setup_sge_ctrl_txq(adapter);
474                         if (err)
475                                 return err;
476                 }
477         }
478
479         err = cxgbe_cfg_queue_count(eth_dev);
480         if (err)
481                 return err;
482
483         return 0;
484 }
485
486 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
487 {
488         int ret;
489         struct sge_eth_txq *txq = (struct sge_eth_txq *)
490                                   (eth_dev->data->tx_queues[tx_queue_id]);
491
492         dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
493
494         ret = t4_sge_eth_txq_start(txq);
495         if (ret == 0)
496                 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
497
498         return ret;
499 }
500
501 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
502 {
503         int ret;
504         struct sge_eth_txq *txq = (struct sge_eth_txq *)
505                                   (eth_dev->data->tx_queues[tx_queue_id]);
506
507         dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
508
509         ret = t4_sge_eth_txq_stop(txq);
510         if (ret == 0)
511                 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
512
513         return ret;
514 }
515
516 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
517                              uint16_t queue_idx, uint16_t nb_desc,
518                              unsigned int socket_id,
519                              const struct rte_eth_txconf *tx_conf __rte_unused)
520 {
521         struct port_info *pi = eth_dev->data->dev_private;
522         struct adapter *adapter = pi->adapter;
523         struct sge *s = &adapter->sge;
524         unsigned int temp_nb_desc;
525         struct sge_eth_txq *txq;
526         int err = 0;
527
528         txq = &s->ethtxq[pi->first_txqset + queue_idx];
529         dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
530                   __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
531                   socket_id, pi->first_txqset);
532
533         /*  Free up the existing queue  */
534         if (eth_dev->data->tx_queues[queue_idx]) {
535                 cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
536                 eth_dev->data->tx_queues[queue_idx] = NULL;
537         }
538
539         eth_dev->data->tx_queues[queue_idx] = (void *)txq;
540
541         /* Sanity Checking
542          *
543          * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
544          */
545         temp_nb_desc = nb_desc;
546         if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
547                 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
548                          __func__, CXGBE_MIN_RING_DESC_SIZE,
549                          CXGBE_DEFAULT_TX_DESC_SIZE);
550                 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
551         } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
552                 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
553                         __func__, CXGBE_MIN_RING_DESC_SIZE,
554                         CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
555                 return -(EINVAL);
556         }
557
558         txq->q.size = temp_nb_desc;
559
560         err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
561                                    s->fw_evtq.cntxt_id, socket_id);
562
563         dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
564                   __func__, txq->q.cntxt_id, txq->q.abs_id, err);
565         return err;
566 }
567
568 void cxgbe_dev_tx_queue_release(void *q)
569 {
570         struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
571
572         if (txq) {
573                 struct port_info *pi = (struct port_info *)
574                                        (txq->eth_dev->data->dev_private);
575                 struct adapter *adap = pi->adapter;
576
577                 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
578                           __func__, pi->port_id, txq->q.cntxt_id);
579
580                 t4_sge_eth_txq_release(adap, txq);
581         }
582 }
583
584 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
585 {
586         struct port_info *pi = eth_dev->data->dev_private;
587         struct adapter *adap = pi->adapter;
588         struct sge_eth_rxq *rxq;
589         int ret;
590
591         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
592                   __func__, pi->port_id, rx_queue_id);
593
594         rxq = eth_dev->data->rx_queues[rx_queue_id];
595         ret = t4_sge_eth_rxq_start(adap, rxq);
596         if (ret == 0)
597                 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
598
599         return ret;
600 }
601
602 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
603 {
604         struct port_info *pi = eth_dev->data->dev_private;
605         struct adapter *adap = pi->adapter;
606         struct sge_eth_rxq *rxq;
607         int ret;
608
609         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
610                   __func__, pi->port_id, rx_queue_id);
611
612         rxq = eth_dev->data->rx_queues[rx_queue_id];
613         ret = t4_sge_eth_rxq_stop(adap, rxq);
614         if (ret == 0)
615                 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
616
617         return ret;
618 }
619
620 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
621                              uint16_t queue_idx, uint16_t nb_desc,
622                              unsigned int socket_id,
623                              const struct rte_eth_rxconf *rx_conf __rte_unused,
624                              struct rte_mempool *mp)
625 {
626         unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
627         struct port_info *pi = eth_dev->data->dev_private;
628         struct adapter *adapter = pi->adapter;
629         struct rte_eth_dev_info dev_info;
630         struct sge *s = &adapter->sge;
631         unsigned int temp_nb_desc;
632         int err = 0, msi_idx = 0;
633         struct sge_eth_rxq *rxq;
634
635         rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
636         dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
637                   __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
638                   socket_id, mp);
639
640         err = cxgbe_dev_info_get(eth_dev, &dev_info);
641         if (err != 0) {
642                 dev_err(adap, "%s: error during getting ethernet device info",
643                         __func__);
644                 return err;
645         }
646
647         /* Must accommodate at least RTE_ETHER_MIN_MTU */
648         if ((pkt_len < dev_info.min_rx_bufsize) ||
649             (pkt_len > dev_info.max_rx_pktlen)) {
650                 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
651                         __func__, dev_info.min_rx_bufsize,
652                         dev_info.max_rx_pktlen);
653                 return -EINVAL;
654         }
655
656         /*  Free up the existing queue  */
657         if (eth_dev->data->rx_queues[queue_idx]) {
658                 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
659                 eth_dev->data->rx_queues[queue_idx] = NULL;
660         }
661
662         eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
663
664         /* Sanity Checking
665          *
666          * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
667          */
668         temp_nb_desc = nb_desc;
669         if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
670                 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
671                          __func__, CXGBE_MIN_RING_DESC_SIZE,
672                          CXGBE_DEFAULT_RX_DESC_SIZE);
673                 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
674         } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
675                 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
676                         __func__, CXGBE_MIN_RING_DESC_SIZE,
677                         CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
678                 return -(EINVAL);
679         }
680
681         rxq->rspq.size = temp_nb_desc;
682         if ((&rxq->fl) != NULL)
683                 rxq->fl.size = temp_nb_desc;
684
685         /* Set to jumbo mode if necessary */
686         if (pkt_len > CXGBE_ETH_MAX_LEN)
687                 eth_dev->data->dev_conf.rxmode.offloads |=
688                         DEV_RX_OFFLOAD_JUMBO_FRAME;
689         else
690                 eth_dev->data->dev_conf.rxmode.offloads &=
691                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
692
693         err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
694                                &rxq->fl, NULL,
695                                is_pf4(adapter) ?
696                                t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
697                                queue_idx, socket_id);
698
699         dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
700                   __func__, err, pi->port_id, rxq->rspq.cntxt_id,
701                   rxq->rspq.abs_id);
702         return err;
703 }
704
705 void cxgbe_dev_rx_queue_release(void *q)
706 {
707         struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
708
709         if (rxq) {
710                 struct port_info *pi = (struct port_info *)
711                                        (rxq->rspq.eth_dev->data->dev_private);
712                 struct adapter *adap = pi->adapter;
713
714                 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
715                           __func__, pi->port_id, rxq->rspq.cntxt_id);
716
717                 t4_sge_eth_rxq_release(adap, rxq);
718         }
719 }
720
721 /*
722  * Get port statistics.
723  */
724 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
725                                 struct rte_eth_stats *eth_stats)
726 {
727         struct port_info *pi = eth_dev->data->dev_private;
728         struct adapter *adapter = pi->adapter;
729         struct sge *s = &adapter->sge;
730         struct port_stats ps;
731         unsigned int i;
732
733         cxgbe_stats_get(pi, &ps);
734
735         /* RX Stats */
736         eth_stats->imissed  = ps.rx_ovflow0 + ps.rx_ovflow1 +
737                               ps.rx_ovflow2 + ps.rx_ovflow3 +
738                               ps.rx_trunc0 + ps.rx_trunc1 +
739                               ps.rx_trunc2 + ps.rx_trunc3;
740         eth_stats->ierrors  = ps.rx_symbol_err + ps.rx_fcs_err +
741                               ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
742                               ps.rx_len_err;
743
744         /* TX Stats */
745         eth_stats->opackets = ps.tx_frames;
746         eth_stats->obytes   = ps.tx_octets;
747         eth_stats->oerrors  = ps.tx_error_frames;
748
749         for (i = 0; i < pi->n_rx_qsets; i++) {
750                 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
751
752                 eth_stats->ipackets += rxq->stats.pkts;
753                 eth_stats->ibytes += rxq->stats.rx_bytes;
754         }
755
756         return 0;
757 }
758
759 /*
760  * Reset port statistics.
761  */
762 static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
763 {
764         struct port_info *pi = eth_dev->data->dev_private;
765         struct adapter *adapter = pi->adapter;
766         struct sge *s = &adapter->sge;
767         unsigned int i;
768
769         cxgbe_stats_reset(pi);
770         for (i = 0; i < pi->n_rx_qsets; i++) {
771                 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
772
773                 memset(&rxq->stats, 0, sizeof(rxq->stats));
774         }
775         for (i = 0; i < pi->n_tx_qsets; i++) {
776                 struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + i];
777
778                 memset(&txq->stats, 0, sizeof(txq->stats));
779         }
780
781         return 0;
782 }
783
784 /* Store extended statistics names and its offset in stats structure  */
785 struct cxgbe_dev_xstats_name_off {
786         char name[RTE_ETH_XSTATS_NAME_SIZE];
787         unsigned int offset;
788 };
789
790 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_rxq_stats_strings[] = {
791         {"packets", offsetof(struct sge_eth_rx_stats, pkts)},
792         {"bytes", offsetof(struct sge_eth_rx_stats, rx_bytes)},
793         {"checksum_offloads", offsetof(struct sge_eth_rx_stats, rx_cso)},
794         {"vlan_extractions", offsetof(struct sge_eth_rx_stats, vlan_ex)},
795         {"dropped_packets", offsetof(struct sge_eth_rx_stats, rx_drops)},
796 };
797
798 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_txq_stats_strings[] = {
799         {"packets", offsetof(struct sge_eth_tx_stats, pkts)},
800         {"bytes", offsetof(struct sge_eth_tx_stats, tx_bytes)},
801         {"tso_requests", offsetof(struct sge_eth_tx_stats, tso)},
802         {"checksum_offloads", offsetof(struct sge_eth_tx_stats, tx_cso)},
803         {"vlan_insertions", offsetof(struct sge_eth_tx_stats, vlan_ins)},
804         {"packet_mapping_errors",
805          offsetof(struct sge_eth_tx_stats, mapping_err)},
806         {"coalesced_wrs", offsetof(struct sge_eth_tx_stats, coal_wr)},
807         {"coalesced_packets", offsetof(struct sge_eth_tx_stats, coal_pkts)},
808 };
809
810 static const struct cxgbe_dev_xstats_name_off cxgbe_dev_port_stats_strings[] = {
811         {"tx_bytes", offsetof(struct port_stats, tx_octets)},
812         {"tx_packets", offsetof(struct port_stats, tx_frames)},
813         {"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
814         {"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
815         {"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
816         {"tx_error_packets", offsetof(struct port_stats, tx_error_frames)},
817         {"tx_size_64_packets", offsetof(struct port_stats, tx_frames_64)},
818         {"tx_size_65_to_127_packets",
819          offsetof(struct port_stats, tx_frames_65_127)},
820         {"tx_size_128_to_255_packets",
821          offsetof(struct port_stats, tx_frames_128_255)},
822         {"tx_size_256_to_511_packets",
823          offsetof(struct port_stats, tx_frames_256_511)},
824         {"tx_size_512_to_1023_packets",
825          offsetof(struct port_stats, tx_frames_512_1023)},
826         {"tx_size_1024_to_1518_packets",
827          offsetof(struct port_stats, tx_frames_1024_1518)},
828         {"tx_size_1519_to_max_packets",
829          offsetof(struct port_stats, tx_frames_1519_max)},
830         {"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
831         {"tx_pause_frames", offsetof(struct port_stats, tx_pause)},
832         {"tx_ppp_pri0_packets", offsetof(struct port_stats, tx_ppp0)},
833         {"tx_ppp_pri1_packets", offsetof(struct port_stats, tx_ppp1)},
834         {"tx_ppp_pri2_packets", offsetof(struct port_stats, tx_ppp2)},
835         {"tx_ppp_pri3_packets", offsetof(struct port_stats, tx_ppp3)},
836         {"tx_ppp_pri4_packets", offsetof(struct port_stats, tx_ppp4)},
837         {"tx_ppp_pri5_packets", offsetof(struct port_stats, tx_ppp5)},
838         {"tx_ppp_pri6_packets", offsetof(struct port_stats, tx_ppp6)},
839         {"tx_ppp_pri7_packets", offsetof(struct port_stats, tx_ppp7)},
840         {"rx_bytes", offsetof(struct port_stats, rx_octets)},
841         {"rx_packets", offsetof(struct port_stats, rx_frames)},
842         {"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
843         {"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
844         {"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
845         {"rx_too_long_packets", offsetof(struct port_stats, rx_too_long)},
846         {"rx_jabber_packets", offsetof(struct port_stats, rx_jabber)},
847         {"rx_fcs_error_packets", offsetof(struct port_stats, rx_fcs_err)},
848         {"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
849         {"rx_symbol_error_packets",
850          offsetof(struct port_stats, rx_symbol_err)},
851         {"rx_short_packets", offsetof(struct port_stats, rx_runt)},
852         {"rx_size_64_packets", offsetof(struct port_stats, rx_frames_64)},
853         {"rx_size_65_to_127_packets",
854          offsetof(struct port_stats, rx_frames_65_127)},
855         {"rx_size_128_to_255_packets",
856          offsetof(struct port_stats, rx_frames_128_255)},
857         {"rx_size_256_to_511_packets",
858          offsetof(struct port_stats, rx_frames_256_511)},
859         {"rx_size_512_to_1023_packets",
860          offsetof(struct port_stats, rx_frames_512_1023)},
861         {"rx_size_1024_to_1518_packets",
862          offsetof(struct port_stats, rx_frames_1024_1518)},
863         {"rx_size_1519_to_max_packets",
864          offsetof(struct port_stats, rx_frames_1519_max)},
865         {"rx_pause_packets", offsetof(struct port_stats, rx_pause)},
866         {"rx_ppp_pri0_packets", offsetof(struct port_stats, rx_ppp0)},
867         {"rx_ppp_pri1_packets", offsetof(struct port_stats, rx_ppp1)},
868         {"rx_ppp_pri2_packets", offsetof(struct port_stats, rx_ppp2)},
869         {"rx_ppp_pri3_packets", offsetof(struct port_stats, rx_ppp3)},
870         {"rx_ppp_pri4_packets", offsetof(struct port_stats, rx_ppp4)},
871         {"rx_ppp_pri5_packets", offsetof(struct port_stats, rx_ppp5)},
872         {"rx_ppp_pri6_packets", offsetof(struct port_stats, rx_ppp6)},
873         {"rx_ppp_pri7_packets", offsetof(struct port_stats, rx_ppp7)},
874         {"rx_bg0_dropped_packets", offsetof(struct port_stats, rx_ovflow0)},
875         {"rx_bg1_dropped_packets", offsetof(struct port_stats, rx_ovflow1)},
876         {"rx_bg2_dropped_packets", offsetof(struct port_stats, rx_ovflow2)},
877         {"rx_bg3_dropped_packets", offsetof(struct port_stats, rx_ovflow3)},
878         {"rx_bg0_truncated_packets", offsetof(struct port_stats, rx_trunc0)},
879         {"rx_bg1_truncated_packets", offsetof(struct port_stats, rx_trunc1)},
880         {"rx_bg2_truncated_packets", offsetof(struct port_stats, rx_trunc2)},
881         {"rx_bg3_truncated_packets", offsetof(struct port_stats, rx_trunc3)},
882 };
883
884 #define CXGBE_NB_RXQ_STATS RTE_DIM(cxgbe_dev_rxq_stats_strings)
885 #define CXGBE_NB_TXQ_STATS RTE_DIM(cxgbe_dev_txq_stats_strings)
886 #define CXGBE_NB_PORT_STATS RTE_DIM(cxgbe_dev_port_stats_strings)
887
888 static u16 cxgbe_dev_xstats_count(struct port_info *pi)
889 {
890         return CXGBE_NB_PORT_STATS +
891                (pi->n_tx_qsets * CXGBE_NB_TXQ_STATS) +
892                (pi->n_rx_qsets * CXGBE_NB_RXQ_STATS);
893 }
894
895 static int cxgbe_dev_xstats(struct rte_eth_dev *dev,
896                             struct rte_eth_xstat_name *xstats_names,
897                             struct rte_eth_xstat *xstats, unsigned int size)
898 {
899         const struct cxgbe_dev_xstats_name_off *xstats_str;
900         struct port_info *pi = dev->data->dev_private;
901         struct adapter *adap = pi->adapter;
902         struct sge *s = &adap->sge;
903         struct port_stats ps;
904         u16 count, i, qid;
905         u64 *stats_ptr;
906
907         count = cxgbe_dev_xstats_count(pi);
908         if (size < count)
909                 return count;
910
911         /* port stats */
912         cxgbe_stats_get(pi, &ps);
913
914         count = 0;
915         xstats_str = cxgbe_dev_port_stats_strings;
916         for (i = 0; i < CXGBE_NB_PORT_STATS; i++, count++) {
917                 if (xstats_names != NULL)
918                         snprintf(xstats_names[count].name,
919                                  sizeof(xstats_names[count].name),
920                                  "%s", xstats_str[i].name);
921                 if (xstats != NULL) {
922                         stats_ptr = RTE_PTR_ADD(&ps,
923                                                 xstats_str[i].offset);
924                         xstats[count].value = *stats_ptr;
925                         xstats[count].id = count;
926                 }
927         }
928
929         /* per-txq stats */
930         xstats_str = cxgbe_dev_txq_stats_strings;
931         for (qid = 0; qid < pi->n_tx_qsets; qid++) {
932                 struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + qid];
933
934                 for (i = 0; i < CXGBE_NB_TXQ_STATS; i++, count++) {
935                         if (xstats_names != NULL)
936                                 snprintf(xstats_names[count].name,
937                                          sizeof(xstats_names[count].name),
938                                          "tx_q%u_%s",
939                                          qid, xstats_str[i].name);
940                         if (xstats != NULL) {
941                                 stats_ptr = RTE_PTR_ADD(&txq->stats,
942                                                         xstats_str[i].offset);
943                                 xstats[count].value = *stats_ptr;
944                                 xstats[count].id = count;
945                         }
946                 }
947         }
948
949         /* per-rxq stats */
950         xstats_str = cxgbe_dev_rxq_stats_strings;
951         for (qid = 0; qid < pi->n_rx_qsets; qid++) {
952                 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + qid];
953
954                 for (i = 0; i < CXGBE_NB_RXQ_STATS; i++, count++) {
955                         if (xstats_names != NULL)
956                                 snprintf(xstats_names[count].name,
957                                          sizeof(xstats_names[count].name),
958                                          "rx_q%u_%s",
959                                          qid, xstats_str[i].name);
960                         if (xstats != NULL) {
961                                 stats_ptr = RTE_PTR_ADD(&rxq->stats,
962                                                         xstats_str[i].offset);
963                                 xstats[count].value = *stats_ptr;
964                                 xstats[count].id = count;
965                         }
966                 }
967         }
968
969         return count;
970 }
971
972 /* Get port extended statistics by ID. */
973 static int cxgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
974                                       const uint64_t *ids, uint64_t *values,
975                                       unsigned int n)
976 {
977         struct port_info *pi = dev->data->dev_private;
978         struct rte_eth_xstat *xstats_copy;
979         u16 count, i;
980         int ret = 0;
981
982         count = cxgbe_dev_xstats_count(pi);
983         if (ids == NULL || values == NULL)
984                 return count;
985
986         xstats_copy = rte_calloc(NULL, count, sizeof(*xstats_copy), 0);
987         if (xstats_copy == NULL)
988                 return -ENOMEM;
989
990         cxgbe_dev_xstats(dev, NULL, xstats_copy, count);
991
992         for (i = 0; i < n; i++) {
993                 if (ids[i] >= count) {
994                         ret = -EINVAL;
995                         goto out_err;
996                 }
997                 values[i] = xstats_copy[ids[i]].value;
998         }
999
1000         ret = n;
1001
1002 out_err:
1003         rte_free(xstats_copy);
1004         return ret;
1005 }
1006
1007 /* Get names of port extended statistics by ID. */
1008 static int cxgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1009                                             struct rte_eth_xstat_name *xnames,
1010                                             const uint64_t *ids, unsigned int n)
1011 {
1012         struct port_info *pi = dev->data->dev_private;
1013         struct rte_eth_xstat_name *xnames_copy;
1014         u16 count, i;
1015         int ret = 0;
1016
1017         count = cxgbe_dev_xstats_count(pi);
1018         if (ids == NULL || xnames == NULL)
1019                 return count;
1020
1021         xnames_copy = rte_calloc(NULL, count, sizeof(*xnames_copy), 0);
1022         if (xnames_copy == NULL)
1023                 return -ENOMEM;
1024
1025         cxgbe_dev_xstats(dev, xnames_copy, NULL, count);
1026
1027         for (i = 0; i < n; i++) {
1028                 if (ids[i] >= count) {
1029                         ret = -EINVAL;
1030                         goto out_err;
1031                 }
1032                 rte_strlcpy(xnames[i].name, xnames_copy[ids[i]].name,
1033                             sizeof(xnames[i].name));
1034         }
1035
1036         ret = n;
1037
1038 out_err:
1039         rte_free(xnames_copy);
1040         return ret;
1041 }
1042
1043 /* Get port extended statistics. */
1044 static int cxgbe_dev_xstats_get(struct rte_eth_dev *dev,
1045                                 struct rte_eth_xstat *xstats, unsigned int n)
1046 {
1047         return cxgbe_dev_xstats(dev, NULL, xstats, n);
1048 }
1049
1050 /* Get names of port extended statistics. */
1051 static int cxgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1052                                       struct rte_eth_xstat_name *xstats_names,
1053                                       unsigned int n)
1054 {
1055         return cxgbe_dev_xstats(dev, xstats_names, NULL, n);
1056 }
1057
1058 /* Reset port extended statistics. */
1059 static int cxgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1060 {
1061         return cxgbe_dev_stats_reset(dev);
1062 }
1063
1064 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1065                                struct rte_eth_fc_conf *fc_conf)
1066 {
1067         struct port_info *pi = eth_dev->data->dev_private;
1068         struct link_config *lc = &pi->link_cfg;
1069         u8 rx_pause = 0, tx_pause = 0;
1070         u32 caps = lc->link_caps;
1071
1072         if (caps & FW_PORT_CAP32_ANEG)
1073                 fc_conf->autoneg = 1;
1074
1075         if (caps & FW_PORT_CAP32_FC_TX)
1076                 tx_pause = 1;
1077
1078         if (caps & FW_PORT_CAP32_FC_RX)
1079                 rx_pause = 1;
1080
1081         if (rx_pause && tx_pause)
1082                 fc_conf->mode = RTE_FC_FULL;
1083         else if (rx_pause)
1084                 fc_conf->mode = RTE_FC_RX_PAUSE;
1085         else if (tx_pause)
1086                 fc_conf->mode = RTE_FC_TX_PAUSE;
1087         else
1088                 fc_conf->mode = RTE_FC_NONE;
1089         return 0;
1090 }
1091
1092 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1093                                struct rte_eth_fc_conf *fc_conf)
1094 {
1095         struct port_info *pi = eth_dev->data->dev_private;
1096         struct link_config *lc = &pi->link_cfg;
1097         u32 new_caps = lc->admin_caps;
1098         u8 tx_pause = 0, rx_pause = 0;
1099         int ret;
1100
1101         if (fc_conf->mode == RTE_FC_FULL) {
1102                 tx_pause = 1;
1103                 rx_pause = 1;
1104         } else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
1105                 tx_pause = 1;
1106         } else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
1107                 rx_pause = 1;
1108         }
1109
1110         ret = t4_set_link_pause(pi, fc_conf->autoneg, tx_pause,
1111                                 rx_pause, &new_caps);
1112         if (ret != 0)
1113                 return ret;
1114
1115         if (!fc_conf->autoneg) {
1116                 if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
1117                         new_caps |= FW_PORT_CAP32_FORCE_PAUSE;
1118         } else {
1119                 new_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
1120         }
1121
1122         if (new_caps != lc->admin_caps) {
1123                 ret = t4_link_l1cfg(pi, new_caps);
1124                 if (ret == 0)
1125                         lc->admin_caps = new_caps;
1126         }
1127
1128         return ret;
1129 }
1130
1131 const uint32_t *
1132 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
1133 {
1134         static const uint32_t ptypes[] = {
1135                 RTE_PTYPE_L3_IPV4,
1136                 RTE_PTYPE_L3_IPV6,
1137                 RTE_PTYPE_UNKNOWN
1138         };
1139
1140         if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
1141                 return ptypes;
1142         return NULL;
1143 }
1144
1145 /* Update RSS hash configuration
1146  */
1147 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
1148                                      struct rte_eth_rss_conf *rss_conf)
1149 {
1150         struct port_info *pi = dev->data->dev_private;
1151         struct adapter *adapter = pi->adapter;
1152         int err;
1153
1154         err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
1155         if (err)
1156                 return err;
1157
1158         pi->rss_hf = rss_conf->rss_hf;
1159
1160         if (rss_conf->rss_key) {
1161                 u32 key[10], mod_key[10];
1162                 int i, j;
1163
1164                 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
1165
1166                 for (i = 9, j = 0; i >= 0; i--, j++)
1167                         mod_key[j] = cpu_to_be32(key[i]);
1168
1169                 t4_write_rss_key(adapter, mod_key, -1);
1170         }
1171
1172         return 0;
1173 }
1174
1175 /* Get RSS hash configuration
1176  */
1177 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1178                                        struct rte_eth_rss_conf *rss_conf)
1179 {
1180         struct port_info *pi = dev->data->dev_private;
1181         struct adapter *adapter = pi->adapter;
1182         u64 rss_hf = 0;
1183         u64 flags = 0;
1184         int err;
1185
1186         err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
1187                                     &flags, NULL);
1188
1189         if (err)
1190                 return err;
1191
1192         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
1193                 rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
1194                 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1195                         rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
1196         }
1197
1198         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1199                 rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
1200
1201         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
1202                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1203                 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1204                         rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1205         }
1206
1207         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1208                 rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
1209
1210         rss_conf->rss_hf = rss_hf;
1211
1212         if (rss_conf->rss_key) {
1213                 u32 key[10], mod_key[10];
1214                 int i, j;
1215
1216                 t4_read_rss_key(adapter, key);
1217
1218                 for (i = 9, j = 0; i >= 0; i--, j++)
1219                         mod_key[j] = be32_to_cpu(key[i]);
1220
1221                 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
1222         }
1223
1224         return 0;
1225 }
1226
1227 static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
1228                                      struct rte_eth_rss_reta_entry64 *reta_conf,
1229                                      uint16_t reta_size)
1230 {
1231         struct port_info *pi = dev->data->dev_private;
1232         struct adapter *adapter = pi->adapter;
1233         u16 i, idx, shift, *rss;
1234         int ret;
1235
1236         if (!(adapter->flags & FULL_INIT_DONE))
1237                 return -ENOMEM;
1238
1239         if (!reta_size || reta_size > pi->rss_size)
1240                 return -EINVAL;
1241
1242         rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0);
1243         if (!rss)
1244                 return -ENOMEM;
1245
1246         rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
1247         for (i = 0; i < reta_size; i++) {
1248                 idx = i / RTE_RETA_GROUP_SIZE;
1249                 shift = i % RTE_RETA_GROUP_SIZE;
1250                 if (!(reta_conf[idx].mask & (1ULL << shift)))
1251                         continue;
1252
1253                 rss[i] = reta_conf[idx].reta[shift];
1254         }
1255
1256         ret = cxgbe_write_rss(pi, rss);
1257         if (!ret)
1258                 rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16));
1259
1260         rte_free(rss);
1261         return ret;
1262 }
1263
1264 static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
1265                                     struct rte_eth_rss_reta_entry64 *reta_conf,
1266                                     uint16_t reta_size)
1267 {
1268         struct port_info *pi = dev->data->dev_private;
1269         struct adapter *adapter = pi->adapter;
1270         u16 i, idx, shift;
1271
1272         if (!(adapter->flags & FULL_INIT_DONE))
1273                 return -ENOMEM;
1274
1275         if (!reta_size || reta_size > pi->rss_size)
1276                 return -EINVAL;
1277
1278         for (i = 0; i < reta_size; i++) {
1279                 idx = i / RTE_RETA_GROUP_SIZE;
1280                 shift = i % RTE_RETA_GROUP_SIZE;
1281                 if (!(reta_conf[idx].mask & (1ULL << shift)))
1282                         continue;
1283
1284                 reta_conf[idx].reta[shift] = pi->rss[i];
1285         }
1286
1287         return 0;
1288 }
1289
1290 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
1291 {
1292         RTE_SET_USED(dev);
1293         return EEPROMSIZE;
1294 }
1295
1296 /**
1297  * eeprom_ptov - translate a physical EEPROM address to virtual
1298  * @phys_addr: the physical EEPROM address
1299  * @fn: the PCI function number
1300  * @sz: size of function-specific area
1301  *
1302  * Translate a physical EEPROM address to virtual.  The first 1K is
1303  * accessed through virtual addresses starting at 31K, the rest is
1304  * accessed through virtual addresses starting at 0.
1305  *
1306  * The mapping is as follows:
1307  * [0..1K) -> [31K..32K)
1308  * [1K..1K+A) -> [31K-A..31K)
1309  * [1K+A..ES) -> [0..ES-A-1K)
1310  *
1311  * where A = @fn * @sz, and ES = EEPROM size.
1312  */
1313 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1314 {
1315         fn *= sz;
1316         if (phys_addr < 1024)
1317                 return phys_addr + (31 << 10);
1318         if (phys_addr < 1024 + fn)
1319                 return fn + phys_addr - 1024;
1320         if (phys_addr < EEPROMSIZE)
1321                 return phys_addr - 1024 - fn;
1322         if (phys_addr < EEPROMVSIZE)
1323                 return phys_addr - 1024;
1324         return -EINVAL;
1325 }
1326
1327 /* The next two routines implement eeprom read/write from physical addresses.
1328  */
1329 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1330 {
1331         int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1332
1333         if (vaddr >= 0)
1334                 vaddr = t4_seeprom_read(adap, vaddr, v);
1335         return vaddr < 0 ? vaddr : 0;
1336 }
1337
1338 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1339 {
1340         int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1341
1342         if (vaddr >= 0)
1343                 vaddr = t4_seeprom_write(adap, vaddr, v);
1344         return vaddr < 0 ? vaddr : 0;
1345 }
1346
1347 #define EEPROM_MAGIC 0x38E2F10C
1348
1349 static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
1350                             struct rte_dev_eeprom_info *e)
1351 {
1352         struct port_info *pi = dev->data->dev_private;
1353         struct adapter *adapter = pi->adapter;
1354         u32 i, err = 0;
1355         u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
1356
1357         if (!buf)
1358                 return -ENOMEM;
1359
1360         e->magic = EEPROM_MAGIC;
1361         for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
1362                 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1363
1364         if (!err)
1365                 rte_memcpy(e->data, buf + e->offset, e->length);
1366         rte_free(buf);
1367         return err;
1368 }
1369
1370 static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
1371                             struct rte_dev_eeprom_info *eeprom)
1372 {
1373         struct port_info *pi = dev->data->dev_private;
1374         struct adapter *adapter = pi->adapter;
1375         u8 *buf;
1376         int err = 0;
1377         u32 aligned_offset, aligned_len, *p;
1378
1379         if (eeprom->magic != EEPROM_MAGIC)
1380                 return -EINVAL;
1381
1382         aligned_offset = eeprom->offset & ~3;
1383         aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1384
1385         if (adapter->pf > 0) {
1386                 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1387
1388                 if (aligned_offset < start ||
1389                     aligned_offset + aligned_len > start + EEPROMPFSIZE)
1390                         return -EPERM;
1391         }
1392
1393         if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1394                 /* RMW possibly needed for first or last words.
1395                  */
1396                 buf = rte_zmalloc(NULL, aligned_len, 0);
1397                 if (!buf)
1398                         return -ENOMEM;
1399                 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1400                 if (!err && aligned_len > 4)
1401                         err = eeprom_rd_phys(adapter,
1402                                              aligned_offset + aligned_len - 4,
1403                                              (u32 *)&buf[aligned_len - 4]);
1404                 if (err)
1405                         goto out;
1406                 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1407                            eeprom->length);
1408         } else {
1409                 buf = eeprom->data;
1410         }
1411
1412         err = t4_seeprom_wp(adapter, false);
1413         if (err)
1414                 goto out;
1415
1416         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1417                 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1418                 aligned_offset += 4;
1419         }
1420
1421         if (!err)
1422                 err = t4_seeprom_wp(adapter, true);
1423 out:
1424         if (buf != eeprom->data)
1425                 rte_free(buf);
1426         return err;
1427 }
1428
1429 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
1430 {
1431         struct port_info *pi = eth_dev->data->dev_private;
1432         struct adapter *adapter = pi->adapter;
1433
1434         return t4_get_regs_len(adapter) / sizeof(uint32_t);
1435 }
1436
1437 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
1438                           struct rte_dev_reg_info *regs)
1439 {
1440         struct port_info *pi = eth_dev->data->dev_private;
1441         struct adapter *adapter = pi->adapter;
1442
1443         regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
1444                 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
1445                 (1 << 16);
1446
1447         if (regs->data == NULL) {
1448                 regs->length = cxgbe_get_regs_len(eth_dev);
1449                 regs->width = sizeof(uint32_t);
1450
1451                 return 0;
1452         }
1453
1454         t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1455
1456         return 0;
1457 }
1458
1459 int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1460 {
1461         struct port_info *pi = dev->data->dev_private;
1462         int ret;
1463
1464         ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
1465         if (ret < 0) {
1466                 dev_err(adapter, "failed to set mac addr; err = %d\n",
1467                         ret);
1468                 return ret;
1469         }
1470         pi->xact_addr_filt = ret;
1471         return 0;
1472 }
1473
1474 static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
1475                                            struct rte_eth_fec_capa *capa_arr)
1476 {
1477         int num = 0;
1478
1479         if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
1480                 if (capa_arr) {
1481                         capa_arr[num].speed = ETH_SPEED_NUM_100G;
1482                         capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1483                                              RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1484                 }
1485                 num++;
1486         }
1487
1488         if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
1489                 if (capa_arr) {
1490                         capa_arr[num].speed = ETH_SPEED_NUM_50G;
1491                         capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1492                                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1493                 }
1494                 num++;
1495         }
1496
1497         if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
1498                 if (capa_arr) {
1499                         capa_arr[num].speed = ETH_SPEED_NUM_25G;
1500                         capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
1501                                              RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
1502                                              RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1503                 }
1504                 num++;
1505         }
1506
1507         return num;
1508 }
1509
1510 static int cxgbe_fec_get_capability(struct rte_eth_dev *dev,
1511                                     struct rte_eth_fec_capa *speed_fec_capa,
1512                                     unsigned int num)
1513 {
1514         struct port_info *pi = dev->data->dev_private;
1515         struct link_config *lc = &pi->link_cfg;
1516         u8 num_entries;
1517
1518         if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1519                 return -EOPNOTSUPP;
1520
1521         num_entries = cxgbe_fec_get_capa_speed_to_fec(lc, NULL);
1522         if (!speed_fec_capa || num < num_entries)
1523                 return num_entries;
1524
1525         return cxgbe_fec_get_capa_speed_to_fec(lc, speed_fec_capa);
1526 }
1527
1528 static int cxgbe_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
1529 {
1530         struct port_info *pi = dev->data->dev_private;
1531         struct link_config *lc = &pi->link_cfg;
1532         u32 fec_caps = 0, caps = lc->link_caps;
1533
1534         if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1535                 return -EOPNOTSUPP;
1536
1537         if (caps & FW_PORT_CAP32_FEC_RS)
1538                 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
1539         else if (caps & FW_PORT_CAP32_FEC_BASER_RS)
1540                 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
1541         else
1542                 fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
1543
1544         *fec_capa = fec_caps;
1545         return 0;
1546 }
1547
1548 static int cxgbe_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
1549 {
1550         struct port_info *pi = dev->data->dev_private;
1551         u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
1552         struct link_config *lc = &pi->link_cfg;
1553         u32 new_caps = lc->admin_caps;
1554         int ret;
1555
1556         if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
1557                 return -EOPNOTSUPP;
1558
1559         if (!fec_capa)
1560                 return -EINVAL;
1561
1562         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
1563                 goto set_fec;
1564
1565         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC))
1566                 fec_none = 1;
1567
1568         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
1569                 fec_baser = 1;
1570
1571         if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
1572                 fec_rs = 1;
1573
1574 set_fec:
1575         ret = t4_set_link_fec(pi, fec_rs, fec_baser, fec_none, &new_caps);
1576         if (ret != 0)
1577                 return ret;
1578
1579         if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
1580                 new_caps |= FW_PORT_CAP32_FORCE_FEC;
1581         else
1582                 new_caps &= ~FW_PORT_CAP32_FORCE_FEC;
1583
1584         if (new_caps != lc->admin_caps) {
1585                 ret = t4_link_l1cfg(pi, new_caps);
1586                 if (ret == 0)
1587                         lc->admin_caps = new_caps;
1588         }
1589
1590         return ret;
1591 }
1592
1593 static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1594         .dev_start              = cxgbe_dev_start,
1595         .dev_stop               = cxgbe_dev_stop,
1596         .dev_close              = cxgbe_dev_close,
1597         .promiscuous_enable     = cxgbe_dev_promiscuous_enable,
1598         .promiscuous_disable    = cxgbe_dev_promiscuous_disable,
1599         .allmulticast_enable    = cxgbe_dev_allmulticast_enable,
1600         .allmulticast_disable   = cxgbe_dev_allmulticast_disable,
1601         .dev_configure          = cxgbe_dev_configure,
1602         .dev_infos_get          = cxgbe_dev_info_get,
1603         .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1604         .link_update            = cxgbe_dev_link_update,
1605         .dev_set_link_up        = cxgbe_dev_set_link_up,
1606         .dev_set_link_down      = cxgbe_dev_set_link_down,
1607         .mtu_set                = cxgbe_dev_mtu_set,
1608         .tx_queue_setup         = cxgbe_dev_tx_queue_setup,
1609         .tx_queue_start         = cxgbe_dev_tx_queue_start,
1610         .tx_queue_stop          = cxgbe_dev_tx_queue_stop,
1611         .tx_queue_release       = cxgbe_dev_tx_queue_release,
1612         .rx_queue_setup         = cxgbe_dev_rx_queue_setup,
1613         .rx_queue_start         = cxgbe_dev_rx_queue_start,
1614         .rx_queue_stop          = cxgbe_dev_rx_queue_stop,
1615         .rx_queue_release       = cxgbe_dev_rx_queue_release,
1616         .flow_ops_get           = cxgbe_dev_flow_ops_get,
1617         .stats_get              = cxgbe_dev_stats_get,
1618         .stats_reset            = cxgbe_dev_stats_reset,
1619         .xstats_get             = cxgbe_dev_xstats_get,
1620         .xstats_get_by_id       = cxgbe_dev_xstats_get_by_id,
1621         .xstats_get_names       = cxgbe_dev_xstats_get_names,
1622         .xstats_get_names_by_id = cxgbe_dev_xstats_get_names_by_id,
1623         .xstats_reset           = cxgbe_dev_xstats_reset,
1624         .flow_ctrl_get          = cxgbe_flow_ctrl_get,
1625         .flow_ctrl_set          = cxgbe_flow_ctrl_set,
1626         .get_eeprom_length      = cxgbe_get_eeprom_length,
1627         .get_eeprom             = cxgbe_get_eeprom,
1628         .set_eeprom             = cxgbe_set_eeprom,
1629         .get_reg                = cxgbe_get_regs,
1630         .rss_hash_update        = cxgbe_dev_rss_hash_update,
1631         .rss_hash_conf_get      = cxgbe_dev_rss_hash_conf_get,
1632         .mac_addr_set           = cxgbe_mac_addr_set,
1633         .reta_update            = cxgbe_dev_rss_reta_update,
1634         .reta_query             = cxgbe_dev_rss_reta_query,
1635         .fec_get_capability     = cxgbe_fec_get_capability,
1636         .fec_get                = cxgbe_fec_get,
1637         .fec_set                = cxgbe_fec_set,
1638 };
1639
1640 /*
1641  * Initialize driver
1642  * It returns 0 on success.
1643  */
1644 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1645 {
1646         struct rte_pci_device *pci_dev;
1647         struct port_info *pi = eth_dev->data->dev_private;
1648         struct adapter *adapter = NULL;
1649         char name[RTE_ETH_NAME_MAX_LEN];
1650         int err = 0;
1651
1652         CXGBE_FUNC_TRACE();
1653
1654         eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1655         eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1656         eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1657         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1658
1659         /* for secondary processes, we attach to ethdevs allocated by primary
1660          * and do minimal initialization.
1661          */
1662         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1663                 int i;
1664
1665                 for (i = 1; i < MAX_NPORTS; i++) {
1666                         struct rte_eth_dev *rest_eth_dev;
1667                         char namei[RTE_ETH_NAME_MAX_LEN];
1668
1669                         snprintf(namei, sizeof(namei), "%s_%d",
1670                                  pci_dev->device.name, i);
1671                         rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1672                         if (rest_eth_dev) {
1673                                 rest_eth_dev->device = &pci_dev->device;
1674                                 rest_eth_dev->dev_ops =
1675                                         eth_dev->dev_ops;
1676                                 rest_eth_dev->rx_pkt_burst =
1677                                         eth_dev->rx_pkt_burst;
1678                                 rest_eth_dev->tx_pkt_burst =
1679                                         eth_dev->tx_pkt_burst;
1680                                 rte_eth_dev_probing_finish(rest_eth_dev);
1681                         }
1682                 }
1683                 return 0;
1684         }
1685
1686         snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1687         adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1688         if (!adapter)
1689                 return -1;
1690
1691         adapter->use_unpacked_mode = 1;
1692         adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1693         if (!adapter->regs) {
1694                 dev_err(adapter, "%s: cannot map device registers\n", __func__);
1695                 err = -ENOMEM;
1696                 goto out_free_adapter;
1697         }
1698         adapter->pdev = pci_dev;
1699         adapter->eth_dev = eth_dev;
1700         pi->adapter = adapter;
1701
1702         cxgbe_process_devargs(adapter);
1703
1704         err = cxgbe_probe(adapter);
1705         if (err) {
1706                 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1707                         __func__, err);
1708                 goto out_free_adapter;
1709         }
1710
1711         return 0;
1712
1713 out_free_adapter:
1714         rte_free(adapter);
1715         return err;
1716 }
1717
1718 static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1719 {
1720         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1721         uint16_t port_id;
1722         int err = 0;
1723
1724         /* Free up other ports and all resources */
1725         RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1726                 err |= rte_eth_dev_close(port_id);
1727
1728         return err == 0 ? 0 : -EIO;
1729 }
1730
1731 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1732         struct rte_pci_device *pci_dev)
1733 {
1734         return rte_eth_dev_pci_generic_probe(pci_dev,
1735                 sizeof(struct port_info), eth_cxgbe_dev_init);
1736 }
1737
1738 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1739 {
1740         return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
1741 }
1742
1743 static struct rte_pci_driver rte_cxgbe_pmd = {
1744         .id_table = cxgb4_pci_tbl,
1745         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1746         .probe = eth_cxgbe_pci_probe,
1747         .remove = eth_cxgbe_pci_remove,
1748 };
1749
1750 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
1751 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
1752 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1753 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
1754                               CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
1755                               CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
1756                               CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
1757                               CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
1758 RTE_LOG_REGISTER_DEFAULT(cxgbe_logtype, NOTICE);
1759 RTE_LOG_REGISTER_SUFFIX(cxgbe_mbox_logtype, mbox, NOTICE);