net/ice/base: fix flow raw field vector extraction
[dpdk.git] / drivers / net / cxgbe / cxgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Chelsio Communications.
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14 #include <netinet/in.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_bus_pci.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_memory.h>
27 #include <rte_tailq.h>
28 #include <rte_eal.h>
29 #include <rte_alarm.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev_driver.h>
32 #include <rte_ethdev_pci.h>
33 #include <rte_malloc.h>
34 #include <rte_random.h>
35 #include <rte_dev.h>
36
37 #include "cxgbe.h"
38 #include "cxgbe_pfvf.h"
39 #include "cxgbe_flow.h"
40
41 int cxgbe_logtype;
42 int cxgbe_mbox_logtype;
43
44 /*
45  * Macros needed to support the PCI Device ID Table ...
46  */
47 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
48         static const struct rte_pci_id cxgb4_pci_tbl[] = {
49 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
50
51 #define PCI_VENDOR_ID_CHELSIO 0x1425
52
53 #define CH_PCI_ID_TABLE_ENTRY(devid) \
54                 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
55
56 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
57                 { .vendor_id = 0, } \
58         }
59
60 /*
61  *... and the PCI ID Table itself ...
62  */
63 #include "base/t4_pci_id_tbl.h"
64
65 uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
66                          uint16_t nb_pkts)
67 {
68         struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
69         uint16_t pkts_sent, pkts_remain;
70         uint16_t total_sent = 0;
71         uint16_t idx = 0;
72         int ret = 0;
73
74         t4_os_lock(&txq->txq_lock);
75         /* free up desc from already completed tx */
76         reclaim_completed_tx(&txq->q);
77         rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
78         while (total_sent < nb_pkts) {
79                 pkts_remain = nb_pkts - total_sent;
80
81                 for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
82                         idx = total_sent + pkts_sent;
83                         if ((idx + 1) < nb_pkts)
84                                 rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
85                                                         volatile void *));
86                         ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
87                         if (ret < 0)
88                                 break;
89                 }
90                 if (!pkts_sent)
91                         break;
92                 total_sent += pkts_sent;
93                 /* reclaim as much as possible */
94                 reclaim_completed_tx(&txq->q);
95         }
96
97         t4_os_unlock(&txq->txq_lock);
98         return total_sent;
99 }
100
101 uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
102                          uint16_t nb_pkts)
103 {
104         struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
105         unsigned int work_done;
106
107         if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
108                 dev_err(adapter, "error in cxgbe poll\n");
109
110         return work_done;
111 }
112
113 int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
114                         struct rte_eth_dev_info *device_info)
115 {
116         struct port_info *pi = eth_dev->data->dev_private;
117         struct adapter *adapter = pi->adapter;
118         int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
119
120         static const struct rte_eth_desc_lim cxgbe_desc_lim = {
121                 .nb_max = CXGBE_MAX_RING_DESC_SIZE,
122                 .nb_min = CXGBE_MIN_RING_DESC_SIZE,
123                 .nb_align = 1,
124         };
125
126         device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
127         device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
128         device_info->max_rx_queues = max_queues;
129         device_info->max_tx_queues = max_queues;
130         device_info->max_mac_addrs = 1;
131         /* XXX: For now we support one MAC/port */
132         device_info->max_vfs = adapter->params.arch.vfcount;
133         device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
134
135         device_info->rx_queue_offload_capa = 0UL;
136         device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
137
138         device_info->tx_queue_offload_capa = 0UL;
139         device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
140
141         device_info->reta_size = pi->rss_size;
142         device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
143         device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
144
145         device_info->rx_desc_lim = cxgbe_desc_lim;
146         device_info->tx_desc_lim = cxgbe_desc_lim;
147         cxgbe_get_speed_caps(pi, &device_info->speed_capa);
148
149         return 0;
150 }
151
152 int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
153 {
154         struct port_info *pi = eth_dev->data->dev_private;
155         struct adapter *adapter = pi->adapter;
156
157         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
158                              1, -1, 1, -1, false);
159 }
160
161 int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
162 {
163         struct port_info *pi = eth_dev->data->dev_private;
164         struct adapter *adapter = pi->adapter;
165
166         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
167                              0, -1, 1, -1, false);
168 }
169
170 int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
171 {
172         struct port_info *pi = eth_dev->data->dev_private;
173         struct adapter *adapter = pi->adapter;
174
175         /* TODO: address filters ?? */
176
177         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
178                              -1, 1, 1, -1, false);
179 }
180
181 int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
182 {
183         struct port_info *pi = eth_dev->data->dev_private;
184         struct adapter *adapter = pi->adapter;
185
186         /* TODO: address filters ?? */
187
188         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
189                              -1, 0, 1, -1, false);
190 }
191
192 int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
193                           int wait_to_complete)
194 {
195         struct port_info *pi = eth_dev->data->dev_private;
196         struct adapter *adapter = pi->adapter;
197         struct sge *s = &adapter->sge;
198         struct rte_eth_link new_link = { 0 };
199         unsigned int i, work_done, budget = 32;
200         u8 old_link = pi->link_cfg.link_ok;
201
202         for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
203                 if (!s->fw_evtq.desc)
204                         break;
205
206                 cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
207
208                 /* Exit if link status changed or always forced up */
209                 if (pi->link_cfg.link_ok != old_link ||
210                     cxgbe_force_linkup(adapter))
211                         break;
212
213                 if (!wait_to_complete)
214                         break;
215
216                 rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
217         }
218
219         new_link.link_status = cxgbe_force_linkup(adapter) ?
220                                ETH_LINK_UP : pi->link_cfg.link_ok;
221         new_link.link_autoneg = pi->link_cfg.autoneg;
222         new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
223         new_link.link_speed = pi->link_cfg.speed;
224
225         return rte_eth_linkstatus_set(eth_dev, &new_link);
226 }
227
228 /**
229  * Set device link up.
230  */
231 int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
232 {
233         struct port_info *pi = dev->data->dev_private;
234         struct adapter *adapter = pi->adapter;
235         unsigned int work_done, budget = 32;
236         struct sge *s = &adapter->sge;
237         int ret;
238
239         if (!s->fw_evtq.desc)
240                 return -ENOMEM;
241
242         /* Flush all link events */
243         cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
244
245         /* If link already up, nothing to do */
246         if (pi->link_cfg.link_ok)
247                 return 0;
248
249         ret = cxgbe_set_link_status(pi, true);
250         if (ret)
251                 return ret;
252
253         cxgbe_dev_link_update(dev, 1);
254         return 0;
255 }
256
257 /**
258  * Set device link down.
259  */
260 int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
261 {
262         struct port_info *pi = dev->data->dev_private;
263         struct adapter *adapter = pi->adapter;
264         unsigned int work_done, budget = 32;
265         struct sge *s = &adapter->sge;
266         int ret;
267
268         if (!s->fw_evtq.desc)
269                 return -ENOMEM;
270
271         /* Flush all link events */
272         cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
273
274         /* If link already down, nothing to do */
275         if (!pi->link_cfg.link_ok)
276                 return 0;
277
278         ret = cxgbe_set_link_status(pi, false);
279         if (ret)
280                 return ret;
281
282         cxgbe_dev_link_update(dev, 0);
283         return 0;
284 }
285
286 int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
287 {
288         struct port_info *pi = eth_dev->data->dev_private;
289         struct adapter *adapter = pi->adapter;
290         struct rte_eth_dev_info dev_info;
291         int err;
292         uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
293
294         err = cxgbe_dev_info_get(eth_dev, &dev_info);
295         if (err != 0)
296                 return err;
297
298         /* Must accommodate at least RTE_ETHER_MIN_MTU */
299         if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
300                 return -EINVAL;
301
302         /* set to jumbo mode if needed */
303         if (new_mtu > RTE_ETHER_MAX_LEN)
304                 eth_dev->data->dev_conf.rxmode.offloads |=
305                         DEV_RX_OFFLOAD_JUMBO_FRAME;
306         else
307                 eth_dev->data->dev_conf.rxmode.offloads &=
308                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
309
310         err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
311                             -1, -1, true);
312         if (!err)
313                 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
314
315         return err;
316 }
317
318 /*
319  * Stop device.
320  */
321 void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
322 {
323         struct port_info *pi = eth_dev->data->dev_private;
324         struct adapter *adapter = pi->adapter;
325
326         CXGBE_FUNC_TRACE();
327
328         if (!(adapter->flags & FULL_INIT_DONE))
329                 return;
330
331         cxgbe_down(pi);
332
333         /*
334          *  We clear queues only if both tx and rx path of the port
335          *  have been disabled
336          */
337         t4_sge_eth_clear_queues(pi);
338 }
339
340 /* Start the device.
341  * It returns 0 on success.
342  */
343 int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
344 {
345         struct port_info *pi = eth_dev->data->dev_private;
346         struct rte_eth_rxmode *rx_conf = &eth_dev->data->dev_conf.rxmode;
347         struct adapter *adapter = pi->adapter;
348         int err = 0, i;
349
350         CXGBE_FUNC_TRACE();
351
352         /*
353          * If we don't have a connection to the firmware there's nothing we
354          * can do.
355          */
356         if (!(adapter->flags & FW_OK)) {
357                 err = -ENXIO;
358                 goto out;
359         }
360
361         if (!(adapter->flags & FULL_INIT_DONE)) {
362                 err = cxgbe_up(adapter);
363                 if (err < 0)
364                         goto out;
365         }
366
367         if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
368                 eth_dev->data->scattered_rx = 1;
369         else
370                 eth_dev->data->scattered_rx = 0;
371
372         cxgbe_enable_rx_queues(pi);
373
374         err = cxgbe_setup_rss(pi);
375         if (err)
376                 goto out;
377
378         for (i = 0; i < pi->n_tx_qsets; i++) {
379                 err = cxgbe_dev_tx_queue_start(eth_dev, i);
380                 if (err)
381                         goto out;
382         }
383
384         for (i = 0; i < pi->n_rx_qsets; i++) {
385                 err = cxgbe_dev_rx_queue_start(eth_dev, i);
386                 if (err)
387                         goto out;
388         }
389
390         err = cxgbe_link_start(pi);
391         if (err)
392                 goto out;
393
394 out:
395         return err;
396 }
397
398 /*
399  * Stop device: disable rx and tx functions to allow for reconfiguring.
400  */
401 void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
402 {
403         struct port_info *pi = eth_dev->data->dev_private;
404         struct adapter *adapter = pi->adapter;
405
406         CXGBE_FUNC_TRACE();
407
408         if (!(adapter->flags & FULL_INIT_DONE))
409                 return;
410
411         cxgbe_down(pi);
412
413         /*
414          *  We clear queues only if both tx and rx path of the port
415          *  have been disabled
416          */
417         t4_sge_eth_clear_queues(pi);
418         eth_dev->data->scattered_rx = 0;
419 }
420
421 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
422 {
423         struct port_info *pi = eth_dev->data->dev_private;
424         struct adapter *adapter = pi->adapter;
425         int err;
426
427         CXGBE_FUNC_TRACE();
428
429         if (!(adapter->flags & FW_QUEUE_BOUND)) {
430                 err = cxgbe_setup_sge_fwevtq(adapter);
431                 if (err)
432                         return err;
433                 adapter->flags |= FW_QUEUE_BOUND;
434                 if (is_pf4(adapter)) {
435                         err = cxgbe_setup_sge_ctrl_txq(adapter);
436                         if (err)
437                                 return err;
438                 }
439         }
440
441         err = cxgbe_cfg_queue_count(eth_dev);
442         if (err)
443                 return err;
444
445         return 0;
446 }
447
448 int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
449 {
450         int ret;
451         struct sge_eth_txq *txq = (struct sge_eth_txq *)
452                                   (eth_dev->data->tx_queues[tx_queue_id]);
453
454         dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
455
456         ret = t4_sge_eth_txq_start(txq);
457         if (ret == 0)
458                 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
459
460         return ret;
461 }
462
463 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
464 {
465         int ret;
466         struct sge_eth_txq *txq = (struct sge_eth_txq *)
467                                   (eth_dev->data->tx_queues[tx_queue_id]);
468
469         dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
470
471         ret = t4_sge_eth_txq_stop(txq);
472         if (ret == 0)
473                 eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
474
475         return ret;
476 }
477
478 int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
479                              uint16_t queue_idx, uint16_t nb_desc,
480                              unsigned int socket_id,
481                              const struct rte_eth_txconf *tx_conf __rte_unused)
482 {
483         struct port_info *pi = eth_dev->data->dev_private;
484         struct adapter *adapter = pi->adapter;
485         struct sge *s = &adapter->sge;
486         struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
487         int err = 0;
488         unsigned int temp_nb_desc;
489
490         dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
491                   __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
492                   socket_id, pi->first_qset);
493
494         /*  Free up the existing queue  */
495         if (eth_dev->data->tx_queues[queue_idx]) {
496                 cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
497                 eth_dev->data->tx_queues[queue_idx] = NULL;
498         }
499
500         eth_dev->data->tx_queues[queue_idx] = (void *)txq;
501
502         /* Sanity Checking
503          *
504          * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
505          */
506         temp_nb_desc = nb_desc;
507         if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
508                 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
509                          __func__, CXGBE_MIN_RING_DESC_SIZE,
510                          CXGBE_DEFAULT_TX_DESC_SIZE);
511                 temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
512         } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
513                 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
514                         __func__, CXGBE_MIN_RING_DESC_SIZE,
515                         CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
516                 return -(EINVAL);
517         }
518
519         txq->q.size = temp_nb_desc;
520
521         err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
522                                    s->fw_evtq.cntxt_id, socket_id);
523
524         dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
525                   __func__, txq->q.cntxt_id, txq->q.abs_id, err);
526         return err;
527 }
528
529 void cxgbe_dev_tx_queue_release(void *q)
530 {
531         struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
532
533         if (txq) {
534                 struct port_info *pi = (struct port_info *)
535                                        (txq->eth_dev->data->dev_private);
536                 struct adapter *adap = pi->adapter;
537
538                 dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
539                           __func__, pi->port_id, txq->q.cntxt_id);
540
541                 t4_sge_eth_txq_release(adap, txq);
542         }
543 }
544
545 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
546 {
547         int ret;
548         struct port_info *pi = eth_dev->data->dev_private;
549         struct adapter *adap = pi->adapter;
550         struct sge_rspq *q;
551
552         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
553                   __func__, pi->port_id, rx_queue_id);
554
555         q = eth_dev->data->rx_queues[rx_queue_id];
556
557         ret = t4_sge_eth_rxq_start(adap, q);
558         if (ret == 0)
559                 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
560
561         return ret;
562 }
563
564 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
565 {
566         int ret;
567         struct port_info *pi = eth_dev->data->dev_private;
568         struct adapter *adap = pi->adapter;
569         struct sge_rspq *q;
570
571         dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
572                   __func__, pi->port_id, rx_queue_id);
573
574         q = eth_dev->data->rx_queues[rx_queue_id];
575         ret = t4_sge_eth_rxq_stop(adap, q);
576         if (ret == 0)
577                 eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
578
579         return ret;
580 }
581
582 int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
583                              uint16_t queue_idx, uint16_t nb_desc,
584                              unsigned int socket_id,
585                              const struct rte_eth_rxconf *rx_conf __rte_unused,
586                              struct rte_mempool *mp)
587 {
588         struct port_info *pi = eth_dev->data->dev_private;
589         struct adapter *adapter = pi->adapter;
590         struct sge *s = &adapter->sge;
591         struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
592         int err = 0;
593         int msi_idx = 0;
594         unsigned int temp_nb_desc;
595         struct rte_eth_dev_info dev_info;
596         unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
597
598         dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
599                   __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
600                   socket_id, mp);
601
602         err = cxgbe_dev_info_get(eth_dev, &dev_info);
603         if (err != 0) {
604                 dev_err(adap, "%s: error during getting ethernet device info",
605                         __func__);
606                 return err;
607         }
608
609         /* Must accommodate at least RTE_ETHER_MIN_MTU */
610         if ((pkt_len < dev_info.min_rx_bufsize) ||
611             (pkt_len > dev_info.max_rx_pktlen)) {
612                 dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
613                         __func__, dev_info.min_rx_bufsize,
614                         dev_info.max_rx_pktlen);
615                 return -EINVAL;
616         }
617
618         /*  Free up the existing queue  */
619         if (eth_dev->data->rx_queues[queue_idx]) {
620                 cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
621                 eth_dev->data->rx_queues[queue_idx] = NULL;
622         }
623
624         eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
625
626         /* Sanity Checking
627          *
628          * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
629          */
630         temp_nb_desc = nb_desc;
631         if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
632                 dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
633                          __func__, CXGBE_MIN_RING_DESC_SIZE,
634                          CXGBE_DEFAULT_RX_DESC_SIZE);
635                 temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
636         } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
637                 dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
638                         __func__, CXGBE_MIN_RING_DESC_SIZE,
639                         CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
640                 return -(EINVAL);
641         }
642
643         rxq->rspq.size = temp_nb_desc;
644         if ((&rxq->fl) != NULL)
645                 rxq->fl.size = temp_nb_desc;
646
647         /* Set to jumbo mode if necessary */
648         if (pkt_len > RTE_ETHER_MAX_LEN)
649                 eth_dev->data->dev_conf.rxmode.offloads |=
650                         DEV_RX_OFFLOAD_JUMBO_FRAME;
651         else
652                 eth_dev->data->dev_conf.rxmode.offloads &=
653                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
654
655         err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
656                                &rxq->fl, NULL,
657                                is_pf4(adapter) ?
658                                t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
659                                queue_idx, socket_id);
660
661         dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
662                   __func__, err, pi->port_id, rxq->rspq.cntxt_id,
663                   rxq->rspq.abs_id);
664         return err;
665 }
666
667 void cxgbe_dev_rx_queue_release(void *q)
668 {
669         struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
670         struct sge_rspq *rq = &rxq->rspq;
671
672         if (rq) {
673                 struct port_info *pi = (struct port_info *)
674                                        (rq->eth_dev->data->dev_private);
675                 struct adapter *adap = pi->adapter;
676
677                 dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
678                           __func__, pi->port_id, rxq->rspq.cntxt_id);
679
680                 t4_sge_eth_rxq_release(adap, rxq);
681         }
682 }
683
684 /*
685  * Get port statistics.
686  */
687 static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
688                                 struct rte_eth_stats *eth_stats)
689 {
690         struct port_info *pi = eth_dev->data->dev_private;
691         struct adapter *adapter = pi->adapter;
692         struct sge *s = &adapter->sge;
693         struct port_stats ps;
694         unsigned int i;
695
696         cxgbe_stats_get(pi, &ps);
697
698         /* RX Stats */
699         eth_stats->imissed  = ps.rx_ovflow0 + ps.rx_ovflow1 +
700                               ps.rx_ovflow2 + ps.rx_ovflow3 +
701                               ps.rx_trunc0 + ps.rx_trunc1 +
702                               ps.rx_trunc2 + ps.rx_trunc3;
703         eth_stats->ierrors  = ps.rx_symbol_err + ps.rx_fcs_err +
704                               ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
705                               ps.rx_len_err;
706
707         /* TX Stats */
708         eth_stats->opackets = ps.tx_frames;
709         eth_stats->obytes   = ps.tx_octets;
710         eth_stats->oerrors  = ps.tx_error_frames;
711
712         for (i = 0; i < pi->n_rx_qsets; i++) {
713                 struct sge_eth_rxq *rxq =
714                         &s->ethrxq[pi->first_qset + i];
715
716                 eth_stats->q_ipackets[i] = rxq->stats.pkts;
717                 eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
718                 eth_stats->ipackets += eth_stats->q_ipackets[i];
719                 eth_stats->ibytes += eth_stats->q_ibytes[i];
720         }
721
722         for (i = 0; i < pi->n_tx_qsets; i++) {
723                 struct sge_eth_txq *txq =
724                         &s->ethtxq[pi->first_qset + i];
725
726                 eth_stats->q_opackets[i] = txq->stats.pkts;
727                 eth_stats->q_obytes[i] = txq->stats.tx_bytes;
728         }
729         return 0;
730 }
731
732 /*
733  * Reset port statistics.
734  */
735 static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
736 {
737         struct port_info *pi = eth_dev->data->dev_private;
738         struct adapter *adapter = pi->adapter;
739         struct sge *s = &adapter->sge;
740         unsigned int i;
741
742         cxgbe_stats_reset(pi);
743         for (i = 0; i < pi->n_rx_qsets; i++) {
744                 struct sge_eth_rxq *rxq =
745                         &s->ethrxq[pi->first_qset + i];
746
747                 rxq->stats.pkts = 0;
748                 rxq->stats.rx_bytes = 0;
749         }
750         for (i = 0; i < pi->n_tx_qsets; i++) {
751                 struct sge_eth_txq *txq =
752                         &s->ethtxq[pi->first_qset + i];
753
754                 txq->stats.pkts = 0;
755                 txq->stats.tx_bytes = 0;
756                 txq->stats.mapping_err = 0;
757         }
758
759         return 0;
760 }
761
762 static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
763                                struct rte_eth_fc_conf *fc_conf)
764 {
765         struct port_info *pi = eth_dev->data->dev_private;
766         struct link_config *lc = &pi->link_cfg;
767         int rx_pause, tx_pause;
768
769         fc_conf->autoneg = lc->fc & PAUSE_AUTONEG;
770         rx_pause = lc->fc & PAUSE_RX;
771         tx_pause = lc->fc & PAUSE_TX;
772
773         if (rx_pause && tx_pause)
774                 fc_conf->mode = RTE_FC_FULL;
775         else if (rx_pause)
776                 fc_conf->mode = RTE_FC_RX_PAUSE;
777         else if (tx_pause)
778                 fc_conf->mode = RTE_FC_TX_PAUSE;
779         else
780                 fc_conf->mode = RTE_FC_NONE;
781         return 0;
782 }
783
784 static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
785                                struct rte_eth_fc_conf *fc_conf)
786 {
787         struct port_info *pi = eth_dev->data->dev_private;
788         struct adapter *adapter = pi->adapter;
789         struct link_config *lc = &pi->link_cfg;
790
791         if (lc->pcaps & FW_PORT_CAP32_ANEG) {
792                 if (fc_conf->autoneg)
793                         lc->requested_fc |= PAUSE_AUTONEG;
794                 else
795                         lc->requested_fc &= ~PAUSE_AUTONEG;
796         }
797
798         if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
799             (fc_conf->mode & RTE_FC_RX_PAUSE))
800                 lc->requested_fc |= PAUSE_RX;
801         else
802                 lc->requested_fc &= ~PAUSE_RX;
803
804         if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
805             (fc_conf->mode & RTE_FC_TX_PAUSE))
806                 lc->requested_fc |= PAUSE_TX;
807         else
808                 lc->requested_fc &= ~PAUSE_TX;
809
810         return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
811                              &pi->link_cfg);
812 }
813
814 const uint32_t *
815 cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
816 {
817         static const uint32_t ptypes[] = {
818                 RTE_PTYPE_L3_IPV4,
819                 RTE_PTYPE_L3_IPV6,
820                 RTE_PTYPE_UNKNOWN
821         };
822
823         if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
824                 return ptypes;
825         return NULL;
826 }
827
828 /* Update RSS hash configuration
829  */
830 static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
831                                      struct rte_eth_rss_conf *rss_conf)
832 {
833         struct port_info *pi = dev->data->dev_private;
834         struct adapter *adapter = pi->adapter;
835         int err;
836
837         err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
838         if (err)
839                 return err;
840
841         pi->rss_hf = rss_conf->rss_hf;
842
843         if (rss_conf->rss_key) {
844                 u32 key[10], mod_key[10];
845                 int i, j;
846
847                 memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
848
849                 for (i = 9, j = 0; i >= 0; i--, j++)
850                         mod_key[j] = cpu_to_be32(key[i]);
851
852                 t4_write_rss_key(adapter, mod_key, -1);
853         }
854
855         return 0;
856 }
857
858 /* Get RSS hash configuration
859  */
860 static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
861                                        struct rte_eth_rss_conf *rss_conf)
862 {
863         struct port_info *pi = dev->data->dev_private;
864         struct adapter *adapter = pi->adapter;
865         u64 rss_hf = 0;
866         u64 flags = 0;
867         int err;
868
869         err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
870                                     &flags, NULL);
871
872         if (err)
873                 return err;
874
875         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
876                 rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
877                 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
878                         rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
879         }
880
881         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
882                 rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
883
884         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
885                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
886                 if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
887                         rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
888         }
889
890         if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
891                 rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
892
893         rss_conf->rss_hf = rss_hf;
894
895         if (rss_conf->rss_key) {
896                 u32 key[10], mod_key[10];
897                 int i, j;
898
899                 t4_read_rss_key(adapter, key);
900
901                 for (i = 9, j = 0; i >= 0; i--, j++)
902                         mod_key[j] = be32_to_cpu(key[i]);
903
904                 memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
905         }
906
907         return 0;
908 }
909
910 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
911 {
912         RTE_SET_USED(dev);
913         return EEPROMSIZE;
914 }
915
916 /**
917  * eeprom_ptov - translate a physical EEPROM address to virtual
918  * @phys_addr: the physical EEPROM address
919  * @fn: the PCI function number
920  * @sz: size of function-specific area
921  *
922  * Translate a physical EEPROM address to virtual.  The first 1K is
923  * accessed through virtual addresses starting at 31K, the rest is
924  * accessed through virtual addresses starting at 0.
925  *
926  * The mapping is as follows:
927  * [0..1K) -> [31K..32K)
928  * [1K..1K+A) -> [31K-A..31K)
929  * [1K+A..ES) -> [0..ES-A-1K)
930  *
931  * where A = @fn * @sz, and ES = EEPROM size.
932  */
933 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
934 {
935         fn *= sz;
936         if (phys_addr < 1024)
937                 return phys_addr + (31 << 10);
938         if (phys_addr < 1024 + fn)
939                 return fn + phys_addr - 1024;
940         if (phys_addr < EEPROMSIZE)
941                 return phys_addr - 1024 - fn;
942         if (phys_addr < EEPROMVSIZE)
943                 return phys_addr - 1024;
944         return -EINVAL;
945 }
946
947 /* The next two routines implement eeprom read/write from physical addresses.
948  */
949 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
950 {
951         int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
952
953         if (vaddr >= 0)
954                 vaddr = t4_seeprom_read(adap, vaddr, v);
955         return vaddr < 0 ? vaddr : 0;
956 }
957
958 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
959 {
960         int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
961
962         if (vaddr >= 0)
963                 vaddr = t4_seeprom_write(adap, vaddr, v);
964         return vaddr < 0 ? vaddr : 0;
965 }
966
967 #define EEPROM_MAGIC 0x38E2F10C
968
969 static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
970                             struct rte_dev_eeprom_info *e)
971 {
972         struct port_info *pi = dev->data->dev_private;
973         struct adapter *adapter = pi->adapter;
974         u32 i, err = 0;
975         u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
976
977         if (!buf)
978                 return -ENOMEM;
979
980         e->magic = EEPROM_MAGIC;
981         for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
982                 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
983
984         if (!err)
985                 rte_memcpy(e->data, buf + e->offset, e->length);
986         rte_free(buf);
987         return err;
988 }
989
990 static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
991                             struct rte_dev_eeprom_info *eeprom)
992 {
993         struct port_info *pi = dev->data->dev_private;
994         struct adapter *adapter = pi->adapter;
995         u8 *buf;
996         int err = 0;
997         u32 aligned_offset, aligned_len, *p;
998
999         if (eeprom->magic != EEPROM_MAGIC)
1000                 return -EINVAL;
1001
1002         aligned_offset = eeprom->offset & ~3;
1003         aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1004
1005         if (adapter->pf > 0) {
1006                 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1007
1008                 if (aligned_offset < start ||
1009                     aligned_offset + aligned_len > start + EEPROMPFSIZE)
1010                         return -EPERM;
1011         }
1012
1013         if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1014                 /* RMW possibly needed for first or last words.
1015                  */
1016                 buf = rte_zmalloc(NULL, aligned_len, 0);
1017                 if (!buf)
1018                         return -ENOMEM;
1019                 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1020                 if (!err && aligned_len > 4)
1021                         err = eeprom_rd_phys(adapter,
1022                                              aligned_offset + aligned_len - 4,
1023                                              (u32 *)&buf[aligned_len - 4]);
1024                 if (err)
1025                         goto out;
1026                 rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1027                            eeprom->length);
1028         } else {
1029                 buf = eeprom->data;
1030         }
1031
1032         err = t4_seeprom_wp(adapter, false);
1033         if (err)
1034                 goto out;
1035
1036         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1037                 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1038                 aligned_offset += 4;
1039         }
1040
1041         if (!err)
1042                 err = t4_seeprom_wp(adapter, true);
1043 out:
1044         if (buf != eeprom->data)
1045                 rte_free(buf);
1046         return err;
1047 }
1048
1049 static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
1050 {
1051         struct port_info *pi = eth_dev->data->dev_private;
1052         struct adapter *adapter = pi->adapter;
1053
1054         return t4_get_regs_len(adapter) / sizeof(uint32_t);
1055 }
1056
1057 static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
1058                           struct rte_dev_reg_info *regs)
1059 {
1060         struct port_info *pi = eth_dev->data->dev_private;
1061         struct adapter *adapter = pi->adapter;
1062
1063         regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
1064                 (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
1065                 (1 << 16);
1066
1067         if (regs->data == NULL) {
1068                 regs->length = cxgbe_get_regs_len(eth_dev);
1069                 regs->width = sizeof(uint32_t);
1070
1071                 return 0;
1072         }
1073
1074         t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
1075
1076         return 0;
1077 }
1078
1079 int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1080 {
1081         struct port_info *pi = dev->data->dev_private;
1082         int ret;
1083
1084         ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
1085         if (ret < 0) {
1086                 dev_err(adapter, "failed to set mac addr; err = %d\n",
1087                         ret);
1088                 return ret;
1089         }
1090         pi->xact_addr_filt = ret;
1091         return 0;
1092 }
1093
1094 static const struct eth_dev_ops cxgbe_eth_dev_ops = {
1095         .dev_start              = cxgbe_dev_start,
1096         .dev_stop               = cxgbe_dev_stop,
1097         .dev_close              = cxgbe_dev_close,
1098         .promiscuous_enable     = cxgbe_dev_promiscuous_enable,
1099         .promiscuous_disable    = cxgbe_dev_promiscuous_disable,
1100         .allmulticast_enable    = cxgbe_dev_allmulticast_enable,
1101         .allmulticast_disable   = cxgbe_dev_allmulticast_disable,
1102         .dev_configure          = cxgbe_dev_configure,
1103         .dev_infos_get          = cxgbe_dev_info_get,
1104         .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1105         .link_update            = cxgbe_dev_link_update,
1106         .dev_set_link_up        = cxgbe_dev_set_link_up,
1107         .dev_set_link_down      = cxgbe_dev_set_link_down,
1108         .mtu_set                = cxgbe_dev_mtu_set,
1109         .tx_queue_setup         = cxgbe_dev_tx_queue_setup,
1110         .tx_queue_start         = cxgbe_dev_tx_queue_start,
1111         .tx_queue_stop          = cxgbe_dev_tx_queue_stop,
1112         .tx_queue_release       = cxgbe_dev_tx_queue_release,
1113         .rx_queue_setup         = cxgbe_dev_rx_queue_setup,
1114         .rx_queue_start         = cxgbe_dev_rx_queue_start,
1115         .rx_queue_stop          = cxgbe_dev_rx_queue_stop,
1116         .rx_queue_release       = cxgbe_dev_rx_queue_release,
1117         .filter_ctrl            = cxgbe_dev_filter_ctrl,
1118         .stats_get              = cxgbe_dev_stats_get,
1119         .stats_reset            = cxgbe_dev_stats_reset,
1120         .flow_ctrl_get          = cxgbe_flow_ctrl_get,
1121         .flow_ctrl_set          = cxgbe_flow_ctrl_set,
1122         .get_eeprom_length      = cxgbe_get_eeprom_length,
1123         .get_eeprom             = cxgbe_get_eeprom,
1124         .set_eeprom             = cxgbe_set_eeprom,
1125         .get_reg                = cxgbe_get_regs,
1126         .rss_hash_update        = cxgbe_dev_rss_hash_update,
1127         .rss_hash_conf_get      = cxgbe_dev_rss_hash_conf_get,
1128         .mac_addr_set           = cxgbe_mac_addr_set,
1129 };
1130
1131 /*
1132  * Initialize driver
1133  * It returns 0 on success.
1134  */
1135 static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
1136 {
1137         struct rte_pci_device *pci_dev;
1138         struct port_info *pi = eth_dev->data->dev_private;
1139         struct adapter *adapter = NULL;
1140         char name[RTE_ETH_NAME_MAX_LEN];
1141         int err = 0;
1142
1143         CXGBE_FUNC_TRACE();
1144
1145         eth_dev->dev_ops = &cxgbe_eth_dev_ops;
1146         eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
1147         eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1148         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1149
1150         /* for secondary processes, we attach to ethdevs allocated by primary
1151          * and do minimal initialization.
1152          */
1153         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1154                 int i;
1155
1156                 for (i = 1; i < MAX_NPORTS; i++) {
1157                         struct rte_eth_dev *rest_eth_dev;
1158                         char namei[RTE_ETH_NAME_MAX_LEN];
1159
1160                         snprintf(namei, sizeof(namei), "%s_%d",
1161                                  pci_dev->device.name, i);
1162                         rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1163                         if (rest_eth_dev) {
1164                                 rest_eth_dev->device = &pci_dev->device;
1165                                 rest_eth_dev->dev_ops =
1166                                         eth_dev->dev_ops;
1167                                 rest_eth_dev->rx_pkt_burst =
1168                                         eth_dev->rx_pkt_burst;
1169                                 rest_eth_dev->tx_pkt_burst =
1170                                         eth_dev->tx_pkt_burst;
1171                                 rte_eth_dev_probing_finish(rest_eth_dev);
1172                         }
1173                 }
1174                 return 0;
1175         }
1176
1177         snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
1178         adapter = rte_zmalloc(name, sizeof(*adapter), 0);
1179         if (!adapter)
1180                 return -1;
1181
1182         adapter->use_unpacked_mode = 1;
1183         adapter->regs = (void *)pci_dev->mem_resource[0].addr;
1184         if (!adapter->regs) {
1185                 dev_err(adapter, "%s: cannot map device registers\n", __func__);
1186                 err = -ENOMEM;
1187                 goto out_free_adapter;
1188         }
1189         adapter->pdev = pci_dev;
1190         adapter->eth_dev = eth_dev;
1191         pi->adapter = adapter;
1192
1193         cxgbe_process_devargs(adapter);
1194
1195         err = cxgbe_probe(adapter);
1196         if (err) {
1197                 dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
1198                         __func__, err);
1199                 goto out_free_adapter;
1200         }
1201
1202         return 0;
1203
1204 out_free_adapter:
1205         rte_free(adapter);
1206         return err;
1207 }
1208
1209 static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1210 {
1211         struct port_info *pi = eth_dev->data->dev_private;
1212         struct adapter *adap = pi->adapter;
1213
1214         /* Free up other ports and all resources */
1215         cxgbe_close(adap);
1216         return 0;
1217 }
1218
1219 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1220         struct rte_pci_device *pci_dev)
1221 {
1222         return rte_eth_dev_pci_generic_probe(pci_dev,
1223                 sizeof(struct port_info), eth_cxgbe_dev_init);
1224 }
1225
1226 static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1227 {
1228         return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
1229 }
1230
1231 static struct rte_pci_driver rte_cxgbe_pmd = {
1232         .id_table = cxgb4_pci_tbl,
1233         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1234         .probe = eth_cxgbe_pci_probe,
1235         .remove = eth_cxgbe_pci_remove,
1236 };
1237
1238 RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
1239 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
1240 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1241 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
1242                               CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
1243                               CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> ");
1244
1245 RTE_INIT(cxgbe_init_log)
1246 {
1247         cxgbe_logtype = rte_log_register("pmd.net.cxgbe");
1248         if (cxgbe_logtype >= 0)
1249                 rte_log_set_level(cxgbe_logtype, RTE_LOG_NOTICE);
1250         cxgbe_mbox_logtype = rte_log_register("pmd.net.cxgbe.mbox");
1251         if (cxgbe_mbox_logtype >= 0)
1252                 rte_log_set_level(cxgbe_mbox_logtype, RTE_LOG_NOTICE);
1253 }