net/cxgbe: add API to program hardware layer 2 table
[dpdk.git] / drivers / net / cxgbe / cxgbe_main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Chelsio Communications.
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14 #include <netinet/in.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_ethdev_pci.h>
32 #include <rte_random.h>
33 #include <rte_dev.h>
34 #include <rte_kvargs.h>
35
36 #include "common.h"
37 #include "t4_regs.h"
38 #include "t4_msg.h"
39 #include "cxgbe.h"
40 #include "clip_tbl.h"
41 #include "l2t.h"
42
43 /**
44  * Allocate a chunk of memory. The allocated memory is cleared.
45  */
46 void *t4_alloc_mem(size_t size)
47 {
48         return rte_zmalloc(NULL, size, 0);
49 }
50
51 /**
52  * Free memory allocated through t4_alloc_mem().
53  */
54 void t4_free_mem(void *addr)
55 {
56         rte_free(addr);
57 }
58
59 /*
60  * Response queue handler for the FW event queue.
61  */
62 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
63                           __rte_unused const struct pkt_gl *gl)
64 {
65         u8 opcode = ((const struct rss_header *)rsp)->opcode;
66
67         rsp++;                                          /* skip RSS header */
68
69         /*
70          * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
71          */
72         if (unlikely(opcode == CPL_FW4_MSG &&
73                      ((const struct cpl_fw4_msg *)rsp)->type ==
74                       FW_TYPE_RSSCPL)) {
75                 rsp++;
76                 opcode = ((const struct rss_header *)rsp)->opcode;
77                 rsp++;
78                 if (opcode != CPL_SGE_EGR_UPDATE) {
79                         dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n",
80                                 opcode);
81                         goto out;
82                 }
83         }
84
85         if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
86                 /* do nothing */
87         } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
88                 const struct cpl_fw6_msg *msg = (const void *)rsp;
89
90                 t4_handle_fw_rpl(q->adapter, msg->data);
91         } else if (opcode == CPL_ABORT_RPL_RSS) {
92                 const struct cpl_abort_rpl_rss *p = (const void *)rsp;
93
94                 hash_del_filter_rpl(q->adapter, p);
95         } else if (opcode == CPL_SET_TCB_RPL) {
96                 const struct cpl_set_tcb_rpl *p = (const void *)rsp;
97
98                 filter_rpl(q->adapter, p);
99         } else if (opcode == CPL_ACT_OPEN_RPL) {
100                 const struct cpl_act_open_rpl *p = (const void *)rsp;
101
102                 hash_filter_rpl(q->adapter, p);
103         } else if (opcode == CPL_L2T_WRITE_RPL) {
104                 const struct cpl_l2t_write_rpl *p = (const void *)rsp;
105
106                 do_l2t_write_rpl(q->adapter, p);
107         } else {
108                 dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
109                         opcode);
110         }
111 out:
112         return 0;
113 }
114
115 /**
116  * Setup sge control queues to pass control information.
117  */
118 int setup_sge_ctrl_txq(struct adapter *adapter)
119 {
120         struct sge *s = &adapter->sge;
121         int err = 0, i = 0;
122
123         for_each_port(adapter, i) {
124                 char name[RTE_ETH_NAME_MAX_LEN];
125                 struct sge_ctrl_txq *q = &s->ctrlq[i];
126
127                 q->q.size = 1024;
128                 err = t4_sge_alloc_ctrl_txq(adapter, q,
129                                             adapter->eth_dev,  i,
130                                             s->fw_evtq.cntxt_id,
131                                             rte_socket_id());
132                 if (err) {
133                         dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
134                                 err);
135                         goto out;
136                 }
137                 snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i);
138                 q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
139                                                      RTE_CACHE_LINE_SIZE,
140                                                      RTE_MBUF_PRIV_ALIGN,
141                                                      RTE_MBUF_DEFAULT_BUF_SIZE,
142                                                      SOCKET_ID_ANY);
143                 if (!q->mb_pool) {
144                         dev_err(adapter, "Can't create ctrl pool for port: %d",
145                                 i);
146                         err = -ENOMEM;
147                         goto out;
148                 }
149         }
150         return 0;
151 out:
152         t4_free_sge_resources(adapter);
153         return err;
154 }
155
156 /**
157  * cxgbe_poll_for_completion: Poll rxq for completion
158  * @q: rxq to poll
159  * @us: microseconds to delay
160  * @cnt: number of times to poll
161  * @c: completion to check for 'done' status
162  *
163  * Polls the rxq for reples until completion is done or the count
164  * expires.
165  */
166 int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
167                               unsigned int cnt, struct t4_completion *c)
168 {
169         unsigned int i;
170         unsigned int work_done, budget = 4;
171
172         if (!c)
173                 return -EINVAL;
174
175         for (i = 0; i < cnt; i++) {
176                 cxgbe_poll(q, NULL, budget, &work_done);
177                 t4_os_lock(&c->lock);
178                 if (c->done) {
179                         t4_os_unlock(&c->lock);
180                         return 0;
181                 }
182                 t4_os_unlock(&c->lock);
183                 udelay(us);
184         }
185         return -ETIMEDOUT;
186 }
187
188 int setup_sge_fwevtq(struct adapter *adapter)
189 {
190         struct sge *s = &adapter->sge;
191         int err = 0;
192         int msi_idx = 0;
193
194         err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev,
195                                msi_idx, NULL, fwevtq_handler, -1, NULL, 0,
196                                rte_socket_id());
197         return err;
198 }
199
200 static int closest_timer(const struct sge *s, int time)
201 {
202         unsigned int i, match = 0;
203         int delta, min_delta = INT_MAX;
204
205         for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
206                 delta = time - s->timer_val[i];
207                 if (delta < 0)
208                         delta = -delta;
209                 if (delta < min_delta) {
210                         min_delta = delta;
211                         match = i;
212                 }
213         }
214         return match;
215 }
216
217 static int closest_thres(const struct sge *s, int thres)
218 {
219         unsigned int i, match = 0;
220         int delta, min_delta = INT_MAX;
221
222         for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
223                 delta = thres - s->counter_val[i];
224                 if (delta < 0)
225                         delta = -delta;
226                 if (delta < min_delta) {
227                         min_delta = delta;
228                         match = i;
229                 }
230         }
231         return match;
232 }
233
234 /**
235  * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
236  * @q: the Rx queue
237  * @us: the hold-off time in us, or 0 to disable timer
238  * @cnt: the hold-off packet count, or 0 to disable counter
239  *
240  * Sets an Rx queue's interrupt hold-off time and packet count.  At least
241  * one of the two needs to be enabled for the queue to generate interrupts.
242  */
243 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
244                                unsigned int cnt)
245 {
246         struct adapter *adap = q->adapter;
247         unsigned int timer_val;
248
249         if (cnt) {
250                 int err;
251                 u32 v, new_idx;
252
253                 new_idx = closest_thres(&adap->sge, cnt);
254                 if (q->desc && q->pktcnt_idx != new_idx) {
255                         /* the queue has already been created, update it */
256                         v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
257                             V_FW_PARAMS_PARAM_X(
258                             FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
259                             V_FW_PARAMS_PARAM_YZ(q->cntxt_id);
260                         err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
261                                             &v, &new_idx);
262                         if (err)
263                                 return err;
264                 }
265                 q->pktcnt_idx = new_idx;
266         }
267
268         timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER :
269                                 closest_timer(&adap->sge, us);
270
271         if ((us | cnt) == 0)
272                 q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
273         else
274                 q->intr_params = V_QINTR_TIMER_IDX(timer_val) |
275                                  V_QINTR_CNT_EN(cnt > 0);
276         return 0;
277 }
278
279 /**
280  * Allocate an active-open TID and set it to the supplied value.
281  */
282 int cxgbe_alloc_atid(struct tid_info *t, void *data)
283 {
284         int atid = -1;
285
286         t4_os_lock(&t->atid_lock);
287         if (t->afree) {
288                 union aopen_entry *p = t->afree;
289
290                 atid = p - t->atid_tab;
291                 t->afree = p->next;
292                 p->data = data;
293                 t->atids_in_use++;
294         }
295         t4_os_unlock(&t->atid_lock);
296         return atid;
297 }
298
299 /**
300  * Release an active-open TID.
301  */
302 void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
303 {
304         union aopen_entry *p = &t->atid_tab[atid];
305
306         t4_os_lock(&t->atid_lock);
307         p->next = t->afree;
308         t->afree = p;
309         t->atids_in_use--;
310         t4_os_unlock(&t->atid_lock);
311 }
312
313 /**
314  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
315  */
316 static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
317 {
318         struct cpl_tid_release *req;
319
320         req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
321         INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
322 }
323
324 /**
325  * Release a TID and inform HW.  If we are unable to allocate the release
326  * message we defer to a work queue.
327  */
328 void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
329                       unsigned short family)
330 {
331         struct rte_mbuf *mbuf;
332         struct adapter *adap = container_of(t, struct adapter, tids);
333
334         WARN_ON(tid >= t->ntids);
335
336         if (t->tid_tab[tid]) {
337                 t->tid_tab[tid] = NULL;
338                 rte_atomic32_dec(&t->conns_in_use);
339                 if (t->hash_base && tid >= t->hash_base) {
340                         if (family == FILTER_TYPE_IPV4)
341                                 rte_atomic32_dec(&t->hash_tids_in_use);
342                 } else {
343                         if (family == FILTER_TYPE_IPV4)
344                                 rte_atomic32_dec(&t->tids_in_use);
345                 }
346         }
347
348         mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
349         if (mbuf) {
350                 mbuf->data_len = sizeof(struct cpl_tid_release);
351                 mbuf->pkt_len = mbuf->data_len;
352                 mk_tid_release(mbuf, tid);
353                 t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
354         }
355 }
356
357 /**
358  * Insert a TID.
359  */
360 void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
361                       unsigned short family)
362 {
363         t->tid_tab[tid] = data;
364         if (t->hash_base && tid >= t->hash_base) {
365                 if (family == FILTER_TYPE_IPV4)
366                         rte_atomic32_inc(&t->hash_tids_in_use);
367         } else {
368                 if (family == FILTER_TYPE_IPV4)
369                         rte_atomic32_inc(&t->tids_in_use);
370         }
371
372         rte_atomic32_inc(&t->conns_in_use);
373 }
374
375 /**
376  * Free TID tables.
377  */
378 static void tid_free(struct tid_info *t)
379 {
380         if (t->tid_tab) {
381                 if (t->ftid_bmap)
382                         rte_bitmap_free(t->ftid_bmap);
383
384                 if (t->ftid_bmap_array)
385                         t4_os_free(t->ftid_bmap_array);
386
387                 t4_os_free(t->tid_tab);
388         }
389
390         memset(t, 0, sizeof(struct tid_info));
391 }
392
393 /**
394  * Allocate and initialize the TID tables.  Returns 0 on success.
395  */
396 static int tid_init(struct tid_info *t)
397 {
398         size_t size;
399         unsigned int ftid_bmap_size;
400         unsigned int natids = t->natids;
401         unsigned int max_ftids = t->nftids;
402
403         ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
404         size = t->ntids * sizeof(*t->tid_tab) +
405                 max_ftids * sizeof(*t->ftid_tab) +
406                 natids * sizeof(*t->atid_tab);
407
408         t->tid_tab = t4_os_alloc(size);
409         if (!t->tid_tab)
410                 return -ENOMEM;
411
412         t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
413         t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->natids];
414         t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
415         if (!t->ftid_bmap_array) {
416                 tid_free(t);
417                 return -ENOMEM;
418         }
419
420         t4_os_lock_init(&t->atid_lock);
421         t4_os_lock_init(&t->ftid_lock);
422
423         t->afree = NULL;
424         t->atids_in_use = 0;
425         rte_atomic32_init(&t->tids_in_use);
426         rte_atomic32_set(&t->tids_in_use, 0);
427         rte_atomic32_init(&t->conns_in_use);
428         rte_atomic32_set(&t->conns_in_use, 0);
429
430         /* Setup the free list for atid_tab and clear the stid bitmap. */
431         if (natids) {
432                 while (--natids)
433                         t->atid_tab[natids - 1].next = &t->atid_tab[natids];
434                 t->afree = t->atid_tab;
435         }
436
437         t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
438                                        ftid_bmap_size);
439         if (!t->ftid_bmap) {
440                 tid_free(t);
441                 return -ENOMEM;
442         }
443
444         return 0;
445 }
446
447 static inline bool is_x_1g_port(const struct link_config *lc)
448 {
449         return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
450 }
451
452 static inline bool is_x_10g_port(const struct link_config *lc)
453 {
454         unsigned int speeds, high_speeds;
455
456         speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps));
457         high_speeds = speeds &
458                       ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
459
460         return high_speeds != 0;
461 }
462
463 inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
464                       unsigned int us, unsigned int cnt,
465                       unsigned int size, unsigned int iqe_size)
466 {
467         q->adapter = adap;
468         cxgb4_set_rspq_intr_params(q, us, cnt);
469         q->iqe_len = iqe_size;
470         q->size = size;
471 }
472
473 int cfg_queue_count(struct rte_eth_dev *eth_dev)
474 {
475         struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
476         struct adapter *adap = pi->adapter;
477         struct sge *s = &adap->sge;
478         unsigned int max_queues = s->max_ethqsets / adap->params.nports;
479
480         if ((eth_dev->data->nb_rx_queues < 1) ||
481             (eth_dev->data->nb_tx_queues < 1))
482                 return -EINVAL;
483
484         if ((eth_dev->data->nb_rx_queues > max_queues) ||
485             (eth_dev->data->nb_tx_queues > max_queues))
486                 return -EINVAL;
487
488         if (eth_dev->data->nb_rx_queues > pi->rss_size)
489                 return -EINVAL;
490
491         /* We must configure RSS, since config has changed*/
492         pi->flags &= ~PORT_RSS_DONE;
493
494         pi->n_rx_qsets = eth_dev->data->nb_rx_queues;
495         pi->n_tx_qsets = eth_dev->data->nb_tx_queues;
496
497         return 0;
498 }
499
500 void cfg_queues(struct rte_eth_dev *eth_dev)
501 {
502         struct rte_config *config = rte_eal_get_configuration();
503         struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
504         struct adapter *adap = pi->adapter;
505         struct sge *s = &adap->sge;
506         unsigned int i, nb_ports = 0, qidx = 0;
507         unsigned int q_per_port = 0;
508
509         if (!(adap->flags & CFG_QUEUES)) {
510                 for_each_port(adap, i) {
511                         struct port_info *tpi = adap2pinfo(adap, i);
512
513                         nb_ports += (is_x_10g_port(&tpi->link_cfg)) ||
514                                      is_x_1g_port(&tpi->link_cfg) ? 1 : 0;
515                 }
516
517                 /*
518                  * We default up to # of cores queues per 1G/10G port.
519                  */
520                 if (nb_ports)
521                         q_per_port = (s->max_ethqsets -
522                                      (adap->params.nports - nb_ports)) /
523                                      nb_ports;
524
525                 if (q_per_port > config->lcore_count)
526                         q_per_port = config->lcore_count;
527
528                 for_each_port(adap, i) {
529                         struct port_info *pi = adap2pinfo(adap, i);
530
531                         pi->first_qset = qidx;
532
533                         /* Initially n_rx_qsets == n_tx_qsets */
534                         pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) ||
535                                           is_x_1g_port(&pi->link_cfg)) ?
536                                           q_per_port : 1;
537                         pi->n_tx_qsets = pi->n_rx_qsets;
538
539                         if (pi->n_rx_qsets > pi->rss_size)
540                                 pi->n_rx_qsets = pi->rss_size;
541
542                         qidx += pi->n_rx_qsets;
543                 }
544
545                 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
546                         struct sge_eth_rxq *r = &s->ethrxq[i];
547
548                         init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
549                         r->usembufs = 1;
550                         r->fl.size = (r->usembufs ? 1024 : 72);
551                 }
552
553                 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
554                         s->ethtxq[i].q.size = 1024;
555
556                 init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
557                 adap->flags |= CFG_QUEUES;
558         }
559 }
560
561 void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats)
562 {
563         t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats,
564                                  &pi->stats_base);
565 }
566
567 void cxgbe_stats_reset(struct port_info *pi)
568 {
569         t4_clr_port_stats(pi->adapter, pi->tx_chan);
570 }
571
572 static void setup_memwin(struct adapter *adap)
573 {
574         u32 mem_win0_base;
575
576         /* For T5, only relative offset inside the PCIe BAR is passed */
577         mem_win0_base = MEMWIN0_BASE;
578
579         /*
580          * Set up memory window for accessing adapter memory ranges.  (Read
581          * back MA register to ensure that changes propagate before we attempt
582          * to use the new values.)
583          */
584         t4_write_reg(adap,
585                      PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
586                                          MEMWIN_NIC),
587                      mem_win0_base | V_BIR(0) |
588                      V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
589         t4_read_reg(adap,
590                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
591                                         MEMWIN_NIC));
592 }
593
594 int init_rss(struct adapter *adap)
595 {
596         unsigned int i;
597
598         if (is_pf4(adap)) {
599                 int err;
600
601                 err = t4_init_rss_mode(adap, adap->mbox);
602                 if (err)
603                         return err;
604         }
605
606         for_each_port(adap, i) {
607                 struct port_info *pi = adap2pinfo(adap, i);
608
609                 pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
610                 if (!pi->rss)
611                         return -ENOMEM;
612
613                 pi->rss_hf = CXGBE_RSS_HF_ALL;
614         }
615         return 0;
616 }
617
618 /**
619  * Dump basic information about the adapter.
620  */
621 void print_adapter_info(struct adapter *adap)
622 {
623         /**
624          * Hardware/Firmware/etc. Version/Revision IDs.
625          */
626         t4_dump_version_info(adap);
627 }
628
629 void print_port_info(struct adapter *adap)
630 {
631         int i;
632         char buf[80];
633         struct rte_pci_addr *loc = &adap->pdev->addr;
634
635         for_each_port(adap, i) {
636                 const struct port_info *pi = adap2pinfo(adap, i);
637                 char *bufp = buf;
638
639                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
640                         bufp += sprintf(bufp, "100M/");
641                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
642                         bufp += sprintf(bufp, "1G/");
643                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
644                         bufp += sprintf(bufp, "10G/");
645                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
646                         bufp += sprintf(bufp, "25G/");
647                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
648                         bufp += sprintf(bufp, "40G/");
649                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
650                         bufp += sprintf(bufp, "50G/");
651                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
652                         bufp += sprintf(bufp, "100G/");
653                 if (bufp != buf)
654                         --bufp;
655                 sprintf(bufp, "BASE-%s",
656                         t4_get_port_type_description(
657                                         (enum fw_port_type)pi->port_type));
658
659                 dev_info(adap,
660                          " " PCI_PRI_FMT " Chelsio rev %d %s %s\n",
661                          loc->domain, loc->bus, loc->devid, loc->function,
662                          CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
663                          (adap->flags & USING_MSIX) ? " MSI-X" :
664                          (adap->flags & USING_MSI) ? " MSI" : "");
665         }
666 }
667
668 static int
669 check_devargs_handler(__rte_unused const char *key, const char *value,
670                       __rte_unused void *opaque)
671 {
672         if (strcmp(value, "1"))
673                 return -1;
674
675         return 0;
676 }
677
678 int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key)
679 {
680         struct rte_kvargs *kvlist;
681
682         if (!devargs)
683                 return 0;
684
685         kvlist = rte_kvargs_parse(devargs->args, NULL);
686         if (!kvlist)
687                 return 0;
688
689         if (!rte_kvargs_count(kvlist, key)) {
690                 rte_kvargs_free(kvlist);
691                 return 0;
692         }
693
694         if (rte_kvargs_process(kvlist, key,
695                                check_devargs_handler, NULL) < 0) {
696                 rte_kvargs_free(kvlist);
697                 return 0;
698         }
699         rte_kvargs_free(kvlist);
700
701         return 1;
702 }
703
704 static void configure_vlan_types(struct adapter *adapter)
705 {
706         struct rte_pci_device *pdev = adapter->pdev;
707         int i;
708
709         for_each_port(adapter, i) {
710                 /* OVLAN Type 0x88a8 */
711                 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
712                                  V_OVLAN_MASK(M_OVLAN_MASK) |
713                                  V_OVLAN_ETYPE(M_OVLAN_ETYPE),
714                                  V_OVLAN_MASK(M_OVLAN_MASK) |
715                                  V_OVLAN_ETYPE(0x88a8));
716                 /* OVLAN Type 0x9100 */
717                 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
718                                  V_OVLAN_MASK(M_OVLAN_MASK) |
719                                  V_OVLAN_ETYPE(M_OVLAN_ETYPE),
720                                  V_OVLAN_MASK(M_OVLAN_MASK) |
721                                  V_OVLAN_ETYPE(0x9100));
722                 /* OVLAN Type 0x8100 */
723                 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2),
724                                  V_OVLAN_MASK(M_OVLAN_MASK) |
725                                  V_OVLAN_ETYPE(M_OVLAN_ETYPE),
726                                  V_OVLAN_MASK(M_OVLAN_MASK) |
727                                  V_OVLAN_ETYPE(0x8100));
728
729                 /* IVLAN 0X8100 */
730                 t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
731                                  V_IVLAN_ETYPE(M_IVLAN_ETYPE),
732                                  V_IVLAN_ETYPE(0x8100));
733
734                 t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
735                                  F_OVLAN_EN0 | F_OVLAN_EN1 |
736                                  F_OVLAN_EN2 | F_IVLAN_EN,
737                                  F_OVLAN_EN0 | F_OVLAN_EN1 |
738                                  F_OVLAN_EN2 | F_IVLAN_EN);
739         }
740
741         if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN))
742                 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
743                                        V_RM_OVLAN(1), V_RM_OVLAN(0));
744 }
745
746 static void configure_pcie_ext_tag(struct adapter *adapter)
747 {
748         u16 v;
749         int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
750
751         if (!pos)
752                 return;
753
754         if (pos > 0) {
755                 t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v);
756                 v |= PCI_EXP_DEVCTL_EXT_TAG;
757                 t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v);
758                 if (is_t6(adapter->params.chip)) {
759                         t4_set_reg_field(adapter, A_PCIE_CFG2,
760                                          V_T6_TOTMAXTAG(M_T6_TOTMAXTAG),
761                                          V_T6_TOTMAXTAG(7));
762                         t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
763                                          V_T6_MINTAG(M_T6_MINTAG),
764                                          V_T6_MINTAG(8));
765                 } else {
766                         t4_set_reg_field(adapter, A_PCIE_CFG2,
767                                          V_TOTMAXTAG(M_TOTMAXTAG),
768                                          V_TOTMAXTAG(3));
769                         t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
770                                          V_MINTAG(M_MINTAG),
771                                          V_MINTAG(8));
772                 }
773         }
774 }
775
776 /* Figure out how many Queue Sets we can support */
777 void configure_max_ethqsets(struct adapter *adapter)
778 {
779         unsigned int ethqsets;
780
781         /*
782          * We need to reserve an Ingress Queue for the Asynchronous Firmware
783          * Event Queue.
784          *
785          * For each Queue Set, we'll need the ability to allocate two Egress
786          * Contexts -- one for the Ingress Queue Free List and one for the TX
787          * Ethernet Queue.
788          */
789         if (is_pf4(adapter)) {
790                 struct pf_resources *pfres = &adapter->params.pfres;
791
792                 ethqsets = pfres->niqflint - 1;
793                 if (pfres->neq < ethqsets * 2)
794                         ethqsets = pfres->neq / 2;
795         } else {
796                 struct vf_resources *vfres = &adapter->params.vfres;
797
798                 ethqsets = vfres->niqflint - 1;
799                 if (vfres->nethctrl != ethqsets)
800                         ethqsets = min(vfres->nethctrl, ethqsets);
801                 if (vfres->neq < ethqsets * 2)
802                         ethqsets = vfres->neq / 2;
803         }
804
805         if (ethqsets > MAX_ETH_QSETS)
806                 ethqsets = MAX_ETH_QSETS;
807         adapter->sge.max_ethqsets = ethqsets;
808 }
809
810 /*
811  * Tweak configuration based on system architecture, etc.  Most of these have
812  * defaults assigned to them by Firmware Configuration Files (if we're using
813  * them) but need to be explicitly set if we're using hard-coded
814  * initialization. So these are essentially common tweaks/settings for
815  * Configuration Files and hard-coded initialization ...
816  */
817 static int adap_init0_tweaks(struct adapter *adapter)
818 {
819         u8 rx_dma_offset;
820
821         /*
822          * Fix up various Host-Dependent Parameters like Page Size, Cache
823          * Line Size, etc.  The firmware default is for a 4KB Page Size and
824          * 64B Cache Line Size ...
825          */
826         t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES,
827                                     T5_LAST_REV);
828
829         /*
830          * Keep the chip default offset to deliver Ingress packets into our
831          * DMA buffers to zero
832          */
833         rx_dma_offset = 0;
834         t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
835                          V_PKTSHIFT(rx_dma_offset));
836
837         t4_set_reg_field(adapter, A_SGE_FLM_CFG,
838                          V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
839                          V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
840
841         t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,
842                          V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));
843
844         t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
845                          V_IDMAARBROUNDROBIN(1U));
846
847         /*
848          * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
849          * adds the pseudo header itself.
850          */
851         t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
852                                F_CSUM_HAS_PSEUDO_HDR, 0);
853
854         return 0;
855 }
856
857 /*
858  * Attempt to initialize the adapter via a Firmware Configuration File.
859  */
860 static int adap_init0_config(struct adapter *adapter, int reset)
861 {
862         struct fw_caps_config_cmd caps_cmd;
863         unsigned long mtype = 0, maddr = 0;
864         u32 finiver, finicsum, cfcsum;
865         int ret;
866         int config_issued = 0;
867         int cfg_addr;
868         char config_name[20];
869
870         /*
871          * Reset device if necessary.
872          */
873         if (reset) {
874                 ret = t4_fw_reset(adapter, adapter->mbox,
875                                   F_PIORSTMODE | F_PIORST);
876                 if (ret < 0) {
877                         dev_warn(adapter, "Firmware reset failed, error %d\n",
878                                  -ret);
879                         goto bye;
880                 }
881         }
882
883         cfg_addr = t4_flash_cfg_addr(adapter);
884         if (cfg_addr < 0) {
885                 ret = cfg_addr;
886                 dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n",
887                          -ret);
888                 goto bye;
889         }
890
891         strcpy(config_name, "On Flash");
892         mtype = FW_MEMTYPE_CF_FLASH;
893         maddr = cfg_addr;
894
895         /*
896          * Issue a Capability Configuration command to the firmware to get it
897          * to parse the Configuration File.  We don't use t4_fw_config_file()
898          * because we want the ability to modify various features after we've
899          * processed the configuration file ...
900          */
901         memset(&caps_cmd, 0, sizeof(caps_cmd));
902         caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
903                                            F_FW_CMD_REQUEST | F_FW_CMD_READ);
904         caps_cmd.cfvalid_to_len16 =
905                 cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID |
906                             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
907                             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
908                             FW_LEN16(caps_cmd));
909         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
910                          &caps_cmd);
911         /*
912          * If the CAPS_CONFIG failed with an ENOENT (for a Firmware
913          * Configuration File in FLASH), our last gasp effort is to use the
914          * Firmware Configuration File which is embedded in the firmware.  A
915          * very few early versions of the firmware didn't have one embedded
916          * but we can ignore those.
917          */
918         if (ret == -ENOENT) {
919                 dev_info(adapter, "%s: Going for embedded config in firmware..\n",
920                          __func__);
921
922                 memset(&caps_cmd, 0, sizeof(caps_cmd));
923                 caps_cmd.op_to_write =
924                         cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
925                                     F_FW_CMD_REQUEST | F_FW_CMD_READ);
926                 caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd));
927                 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
928                                  sizeof(caps_cmd), &caps_cmd);
929                 strcpy(config_name, "Firmware Default");
930         }
931
932         config_issued = 1;
933         if (ret < 0)
934                 goto bye;
935
936         finiver = be32_to_cpu(caps_cmd.finiver);
937         finicsum = be32_to_cpu(caps_cmd.finicsum);
938         cfcsum = be32_to_cpu(caps_cmd.cfcsum);
939         if (finicsum != cfcsum)
940                 dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n",
941                          finicsum, cfcsum);
942
943         /*
944          * If we're a pure NIC driver then disable all offloading facilities.
945          * This will allow the firmware to optimize aspects of the hardware
946          * configuration which will result in improved performance.
947          */
948         caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
949         caps_cmd.toecaps = 0;
950         caps_cmd.iscsicaps = 0;
951         caps_cmd.rdmacaps = 0;
952         caps_cmd.fcoecaps = 0;
953
954         /*
955          * And now tell the firmware to use the configuration we just loaded.
956          */
957         caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
958                                            F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
959         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
960         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
961                          NULL);
962         if (ret < 0) {
963                 dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n",
964                          -ret);
965                 goto bye;
966         }
967
968         /*
969          * Tweak configuration based on system architecture, etc.
970          */
971         ret = adap_init0_tweaks(adapter);
972         if (ret < 0) {
973                 dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret);
974                 goto bye;
975         }
976
977         /*
978          * And finally tell the firmware to initialize itself using the
979          * parameters from the Configuration File.
980          */
981         ret = t4_fw_initialize(adapter, adapter->mbox);
982         if (ret < 0) {
983                 dev_warn(adapter, "Initializing Firmware failed, error %d\n",
984                          -ret);
985                 goto bye;
986         }
987
988         /*
989          * Return successfully and note that we're operating with parameters
990          * not supplied by the driver, rather than from hard-wired
991          * initialization constants buried in the driver.
992          */
993         dev_info(adapter,
994                  "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
995                  config_name, finiver, cfcsum);
996
997         return 0;
998
999         /*
1000          * Something bad happened.  Return the error ...  (If the "error"
1001          * is that there's no Configuration File on the adapter we don't
1002          * want to issue a warning since this is fairly common.)
1003          */
1004 bye:
1005         if (config_issued && ret != -ENOENT)
1006                 dev_warn(adapter, "\"%s\" configuration file error %d\n",
1007                          config_name, -ret);
1008
1009         dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret);
1010         return ret;
1011 }
1012
1013 static int adap_init0(struct adapter *adap)
1014 {
1015         struct fw_caps_config_cmd caps_cmd;
1016         int ret = 0;
1017         u32 v, port_vec;
1018         enum dev_state state;
1019         u32 params[7], val[7];
1020         int reset = 1;
1021         int mbox = adap->mbox;
1022
1023         /*
1024          * Contact FW, advertising Master capability.
1025          */
1026         ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
1027         if (ret < 0) {
1028                 dev_err(adap, "%s: could not connect to FW, error %d\n",
1029                         __func__, -ret);
1030                 goto bye;
1031         }
1032
1033         CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__,
1034                          adap->mbox, ret);
1035
1036         if (ret == mbox)
1037                 adap->flags |= MASTER_PF;
1038
1039         if (state == DEV_STATE_INIT) {
1040                 /*
1041                  * Force halt and reset FW because a previous instance may have
1042                  * exited abnormally without properly shutting down
1043                  */
1044                 ret = t4_fw_halt(adap, adap->mbox, reset);
1045                 if (ret < 0) {
1046                         dev_err(adap, "Failed to halt. Exit.\n");
1047                         goto bye;
1048                 }
1049
1050                 ret = t4_fw_restart(adap, adap->mbox, reset);
1051                 if (ret < 0) {
1052                         dev_err(adap, "Failed to restart. Exit.\n");
1053                         goto bye;
1054                 }
1055                 state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
1056         }
1057
1058         t4_get_version_info(adap);
1059
1060         ret = t4_get_core_clock(adap, &adap->params.vpd);
1061         if (ret < 0) {
1062                 dev_err(adap, "%s: could not get core clock, error %d\n",
1063                         __func__, -ret);
1064                 goto bye;
1065         }
1066
1067         /*
1068          * If the firmware is initialized already (and we're not forcing a
1069          * master initialization), note that we're living with existing
1070          * adapter parameters.  Otherwise, it's time to try initializing the
1071          * adapter ...
1072          */
1073         if (state == DEV_STATE_INIT) {
1074                 dev_info(adap, "Coming up as %s: Adapter already initialized\n",
1075                          adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
1076         } else {
1077                 dev_info(adap, "Coming up as MASTER: Initializing adapter\n");
1078
1079                 ret = adap_init0_config(adap, reset);
1080                 if (ret == -ENOENT) {
1081                         dev_err(adap,
1082                                 "No Configuration File present on adapter. Using hard-wired configuration parameters.\n");
1083                         goto bye;
1084                 }
1085         }
1086         if (ret < 0) {
1087                 dev_err(adap, "could not initialize adapter, error %d\n", -ret);
1088                 goto bye;
1089         }
1090
1091         /* Now that we've successfully configured and initialized the adapter
1092          * (or found it already initialized), we can ask the Firmware what
1093          * resources it has provisioned for us.
1094          */
1095         ret = t4_get_pfres(adap);
1096         if (ret) {
1097                 dev_err(adap->pdev_dev,
1098                         "Unable to retrieve resource provisioning info\n");
1099                 goto bye;
1100         }
1101
1102         /* Find out what ports are available to us. */
1103         v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
1104             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
1105         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
1106         if (ret < 0) {
1107                 dev_err(adap, "%s: failure in t4_query_params; error = %d\n",
1108                         __func__, ret);
1109                 goto bye;
1110         }
1111
1112         adap->params.nports = hweight32(port_vec);
1113         adap->params.portvec = port_vec;
1114
1115         dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
1116                   adap->params.nports);
1117
1118         /*
1119          * Give the SGE code a chance to pull in anything that it needs ...
1120          * Note that this must be called after we retrieve our VPD parameters
1121          * in order to know how to convert core ticks to seconds, etc.
1122          */
1123         ret = t4_sge_init(adap);
1124         if (ret < 0) {
1125                 dev_err(adap, "t4_sge_init failed with error %d\n",
1126                         -ret);
1127                 goto bye;
1128         }
1129
1130         /*
1131          * Grab some of our basic fundamental operating parameters.
1132          */
1133 #define FW_PARAM_DEV(param) \
1134         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1135          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1136
1137 #define FW_PARAM_PFVF(param) \
1138         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1139          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
1140          V_FW_PARAMS_PARAM_Y(0) | \
1141          V_FW_PARAMS_PARAM_Z(0))
1142
1143         params[0] = FW_PARAM_PFVF(L2T_START);
1144         params[1] = FW_PARAM_PFVF(L2T_END);
1145         params[2] = FW_PARAM_PFVF(FILTER_START);
1146         params[3] = FW_PARAM_PFVF(FILTER_END);
1147         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 4, params, val);
1148         if (ret < 0)
1149                 goto bye;
1150         adap->l2t_start = val[0];
1151         adap->l2t_end = val[1];
1152         adap->tids.ftid_base = val[2];
1153         adap->tids.nftids = val[3] - val[2] + 1;
1154
1155         params[0] = FW_PARAM_PFVF(CLIP_START);
1156         params[1] = FW_PARAM_PFVF(CLIP_END);
1157         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
1158         if (ret < 0)
1159                 goto bye;
1160         adap->clipt_start = val[0];
1161         adap->clipt_end = val[1];
1162
1163         /*
1164          * Get device capabilities so we can determine what resources we need
1165          * to manage.
1166          */
1167         memset(&caps_cmd, 0, sizeof(caps_cmd));
1168         caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1169                                      F_FW_CMD_REQUEST | F_FW_CMD_READ);
1170         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
1171         ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
1172                          &caps_cmd);
1173         if (ret < 0)
1174                 goto bye;
1175
1176         if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
1177             is_t6(adap->params.chip)) {
1178                 if (init_hash_filter(adap) < 0)
1179                         goto bye;
1180         }
1181
1182         /* query tid-related parameters */
1183         params[0] = FW_PARAM_DEV(NTID);
1184         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
1185                               params, val);
1186         if (ret < 0)
1187                 goto bye;
1188         adap->tids.ntids = val[0];
1189         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
1190
1191         /* If we're running on newer firmware, let it know that we're
1192          * prepared to deal with encapsulated CPL messages.  Older
1193          * firmware won't understand this and we'll just get
1194          * unencapsulated messages ...
1195          */
1196         params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1197         val[0] = 1;
1198         (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1199
1200         /*
1201          * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
1202          * capability.  Earlier versions of the firmware didn't have the
1203          * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
1204          * permission to use ULPTX MEMWRITE DSGL.
1205          */
1206         if (is_t4(adap->params.chip)) {
1207                 adap->params.ulptx_memwrite_dsgl = false;
1208         } else {
1209                 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
1210                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1211                                       1, params, val);
1212                 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
1213         }
1214
1215         /*
1216          * The MTU/MSS Table is initialized by now, so load their values.  If
1217          * we're initializing the adapter, then we'll make any modifications
1218          * we want to the MTU/MSS Table and also initialize the congestion
1219          * parameters.
1220          */
1221         t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
1222         if (state != DEV_STATE_INIT) {
1223                 int i;
1224
1225                 /*
1226                  * The default MTU Table contains values 1492 and 1500.
1227                  * However, for TCP, it's better to have two values which are
1228                  * a multiple of 8 +/- 4 bytes apart near this popular MTU.
1229                  * This allows us to have a TCP Data Payload which is a
1230                  * multiple of 8 regardless of what combination of TCP Options
1231                  * are in use (always a multiple of 4 bytes) which is
1232                  * important for performance reasons.  For instance, if no
1233                  * options are in use, then we have a 20-byte IP header and a
1234                  * 20-byte TCP header.  In this case, a 1500-byte MSS would
1235                  * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
1236                  * which is not a multiple of 8.  So using an MSS of 1488 in
1237                  * this case results in a TCP Data Payload of 1448 bytes which
1238                  * is a multiple of 8.  On the other hand, if 12-byte TCP Time
1239                  * Stamps have been negotiated, then an MTU of 1500 bytes
1240                  * results in a TCP Data Payload of 1448 bytes which, as
1241                  * above, is a multiple of 8 bytes ...
1242                  */
1243                 for (i = 0; i < NMTUS; i++)
1244                         if (adap->params.mtus[i] == 1492) {
1245                                 adap->params.mtus[i] = 1488;
1246                                 break;
1247                         }
1248
1249                 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
1250                              adap->params.b_wnd);
1251         }
1252         t4_init_sge_params(adap);
1253         t4_init_tp_params(adap);
1254         configure_pcie_ext_tag(adap);
1255         configure_vlan_types(adap);
1256         configure_max_ethqsets(adap);
1257
1258         adap->params.drv_memwin = MEMWIN_NIC;
1259         adap->flags |= FW_OK;
1260         dev_debug(adap, "%s: returning zero..\n", __func__);
1261         return 0;
1262
1263         /*
1264          * Something bad happened.  If a command timed out or failed with EIO
1265          * FW does not operate within its spec or something catastrophic
1266          * happened to HW/FW, stop issuing commands.
1267          */
1268 bye:
1269         if (ret != -ETIMEDOUT && ret != -EIO)
1270                 t4_fw_bye(adap, adap->mbox);
1271         return ret;
1272 }
1273
1274 /**
1275  * t4_os_portmod_changed - handle port module changes
1276  * @adap: the adapter associated with the module change
1277  * @port_id: the port index whose module status has changed
1278  *
1279  * This is the OS-dependent handler for port module changes.  It is
1280  * invoked when a port module is removed or inserted for any OS-specific
1281  * processing.
1282  */
1283 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
1284 {
1285         static const char * const mod_str[] = {
1286                 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
1287         };
1288
1289         const struct port_info *pi = adap2pinfo(adap, port_id);
1290
1291         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
1292                 dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
1293         else if (pi->mod_type < ARRAY_SIZE(mod_str))
1294                 dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
1295                          mod_str[pi->mod_type]);
1296         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
1297                 dev_info(adap, "Port%d: unsupported port module inserted\n",
1298                          pi->port_id);
1299         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
1300                 dev_info(adap, "Port%d: unknown port module inserted\n",
1301                          pi->port_id);
1302         else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
1303                 dev_info(adap, "Port%d: transceiver module error\n",
1304                          pi->port_id);
1305         else
1306                 dev_info(adap, "Port%d: unknown module type %d inserted\n",
1307                          pi->port_id, pi->mod_type);
1308 }
1309
1310 inline bool force_linkup(struct adapter *adap)
1311 {
1312         struct rte_pci_device *pdev = adap->pdev;
1313
1314         if (is_pf4(adap))
1315                 return false;   /* force_linkup not required for pf driver*/
1316         if (!cxgbe_get_devargs(pdev->device.devargs,
1317                                CXGBE_DEVARG_FORCE_LINK_UP))
1318                 return false;
1319         return true;
1320 }
1321
1322 /**
1323  * link_start - enable a port
1324  * @dev: the port to enable
1325  *
1326  * Performs the MAC and PHY actions needed to enable a port.
1327  */
1328 int link_start(struct port_info *pi)
1329 {
1330         struct adapter *adapter = pi->adapter;
1331         int ret;
1332         unsigned int mtu;
1333
1334         mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1335               (ETHER_HDR_LEN + ETHER_CRC_LEN);
1336
1337         /*
1338          * We do not set address filters and promiscuity here, the stack does
1339          * that step explicitly.
1340          */
1341         ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1,
1342                             -1, 1, true);
1343         if (ret == 0) {
1344                 ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
1345                                     pi->xact_addr_filt,
1346                                     (u8 *)&pi->eth_dev->data->mac_addrs[0],
1347                                     true, true);
1348                 if (ret >= 0) {
1349                         pi->xact_addr_filt = ret;
1350                         ret = 0;
1351                 }
1352         }
1353         if (ret == 0 && is_pf4(adapter))
1354                 ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
1355                                     &pi->link_cfg);
1356         if (ret == 0) {
1357                 /*
1358                  * Enabling a Virtual Interface can result in an interrupt
1359                  * during the processing of the VI Enable command and, in some
1360                  * paths, result in an attempt to issue another command in the
1361                  * interrupt context.  Thus, we disable interrupts during the
1362                  * course of the VI Enable command ...
1363                  */
1364                 ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
1365                                           true, true, false);
1366         }
1367
1368         if (ret == 0 && force_linkup(adapter))
1369                 pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1370         return ret;
1371 }
1372
1373 /**
1374  * cxgbe_write_rss_conf - flash the RSS configuration for a given port
1375  * @pi: the port
1376  * @rss_hf: Hash configuration to apply
1377  */
1378 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
1379 {
1380         struct adapter *adapter = pi->adapter;
1381         const struct sge_eth_rxq *rxq;
1382         u64 flags = 0;
1383         u16 rss;
1384         int err;
1385
1386         /*  Should never be called before setting up sge eth rx queues */
1387         if (!(adapter->flags & FULL_INIT_DONE)) {
1388                 dev_err(adap, "%s No RXQs available on port %d\n",
1389                         __func__, pi->port_id);
1390                 return -EINVAL;
1391         }
1392
1393         /* Don't allow unsupported hash functions */
1394         if (rss_hf & ~CXGBE_RSS_HF_ALL)
1395                 return -EINVAL;
1396
1397         if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
1398                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
1399
1400         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1401                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
1402
1403         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1404                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
1405                          F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1406
1407         if (rss_hf & CXGBE_RSS_HF_IPV6_MASK)
1408                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
1409
1410         if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK)
1411                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
1412                          F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
1413
1414         if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK)
1415                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
1416                          F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
1417                          F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1418
1419         rxq = &adapter->sge.ethrxq[pi->first_qset];
1420         rss = rxq[0].rspq.abs_id;
1421
1422         /* If Tunnel All Lookup isn't specified in the global RSS
1423          * Configuration, then we need to specify a default Ingress
1424          * Queue for any ingress packets which aren't hashed.  We'll
1425          * use our first ingress queue ...
1426          */
1427         err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
1428                                flags, rss);
1429         return err;
1430 }
1431
1432 /**
1433  * cxgbe_write_rss - write the RSS table for a given port
1434  * @pi: the port
1435  * @queues: array of queue indices for RSS
1436  *
1437  * Sets up the portion of the HW RSS table for the port's VI to distribute
1438  * packets to the Rx queues in @queues.
1439  */
1440 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
1441 {
1442         u16 *rss;
1443         int i, err;
1444         struct adapter *adapter = pi->adapter;
1445         const struct sge_eth_rxq *rxq;
1446
1447         /*  Should never be called before setting up sge eth rx queues */
1448         BUG_ON(!(adapter->flags & FULL_INIT_DONE));
1449
1450         rxq = &adapter->sge.ethrxq[pi->first_qset];
1451         rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
1452         if (!rss)
1453                 return -ENOMEM;
1454
1455         /* map the queue indices to queue ids */
1456         for (i = 0; i < pi->rss_size; i++, queues++)
1457                 rss[i] = rxq[*queues].rspq.abs_id;
1458
1459         err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
1460                                   pi->rss_size, rss, pi->rss_size);
1461         rte_free(rss);
1462         return err;
1463 }
1464
1465 /**
1466  * setup_rss - configure RSS
1467  * @adapter: the adapter
1468  *
1469  * Sets up RSS to distribute packets to multiple receive queues.  We
1470  * configure the RSS CPU lookup table to distribute to the number of HW
1471  * receive queues, and the response queue lookup table to narrow that
1472  * down to the response queues actually configured for each port.
1473  * We always configure the RSS mapping for all ports since the mapping
1474  * table has plenty of entries.
1475  */
1476 int setup_rss(struct port_info *pi)
1477 {
1478         int j, err;
1479         struct adapter *adapter = pi->adapter;
1480
1481         dev_debug(adapter, "%s:  pi->rss_size = %u; pi->n_rx_qsets = %u\n",
1482                   __func__, pi->rss_size, pi->n_rx_qsets);
1483
1484         if (!(pi->flags & PORT_RSS_DONE)) {
1485                 if (adapter->flags & FULL_INIT_DONE) {
1486                         /* Fill default values with equal distribution */
1487                         for (j = 0; j < pi->rss_size; j++)
1488                                 pi->rss[j] = j % pi->n_rx_qsets;
1489
1490                         err = cxgbe_write_rss(pi, pi->rss);
1491                         if (err)
1492                                 return err;
1493
1494                         err = cxgbe_write_rss_conf(pi, pi->rss_hf);
1495                         if (err)
1496                                 return err;
1497                         pi->flags |= PORT_RSS_DONE;
1498                 }
1499         }
1500         return 0;
1501 }
1502
1503 /*
1504  * Enable NAPI scheduling and interrupt generation for all Rx queues.
1505  */
1506 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
1507 {
1508         /* 0-increment GTS to start the timer and enable interrupts */
1509         t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
1510                                           T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
1511                      V_SEINTARM(q->intr_params) |
1512                      V_INGRESSQID(q->cntxt_id));
1513 }
1514
1515 void cxgbe_enable_rx_queues(struct port_info *pi)
1516 {
1517         struct adapter *adap = pi->adapter;
1518         struct sge *s = &adap->sge;
1519         unsigned int i;
1520
1521         for (i = 0; i < pi->n_rx_qsets; i++)
1522                 enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq);
1523 }
1524
1525 /**
1526  * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps.
1527  * @port_type: Firmware Port Type
1528  * @fw_caps: Firmware Port Capabilities
1529  * @speed_caps: Device Info Speed Capabilities
1530  *
1531  * Translate a Firmware Port Capabilities specification to Device Info
1532  * Speed Capabilities.
1533  */
1534 static void fw_caps_to_speed_caps(enum fw_port_type port_type,
1535                                   unsigned int fw_caps,
1536                                   u32 *speed_caps)
1537 {
1538 #define SET_SPEED(__speed_name) \
1539         do { \
1540                 *speed_caps |= ETH_LINK_ ## __speed_name; \
1541         } while (0)
1542
1543 #define FW_CAPS_TO_SPEED(__fw_name) \
1544         do { \
1545                 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1546                         SET_SPEED(__fw_name); \
1547         } while (0)
1548
1549         switch (port_type) {
1550         case FW_PORT_TYPE_BT_SGMII:
1551         case FW_PORT_TYPE_BT_XFI:
1552         case FW_PORT_TYPE_BT_XAUI:
1553                 FW_CAPS_TO_SPEED(SPEED_100M);
1554                 FW_CAPS_TO_SPEED(SPEED_1G);
1555                 FW_CAPS_TO_SPEED(SPEED_10G);
1556                 break;
1557
1558         case FW_PORT_TYPE_KX4:
1559         case FW_PORT_TYPE_KX:
1560         case FW_PORT_TYPE_FIBER_XFI:
1561         case FW_PORT_TYPE_FIBER_XAUI:
1562         case FW_PORT_TYPE_SFP:
1563         case FW_PORT_TYPE_QSFP_10G:
1564         case FW_PORT_TYPE_QSA:
1565                 FW_CAPS_TO_SPEED(SPEED_1G);
1566                 FW_CAPS_TO_SPEED(SPEED_10G);
1567                 break;
1568
1569         case FW_PORT_TYPE_KR:
1570                 SET_SPEED(SPEED_10G);
1571                 break;
1572
1573         case FW_PORT_TYPE_BP_AP:
1574         case FW_PORT_TYPE_BP4_AP:
1575                 SET_SPEED(SPEED_1G);
1576                 SET_SPEED(SPEED_10G);
1577                 break;
1578
1579         case FW_PORT_TYPE_BP40_BA:
1580         case FW_PORT_TYPE_QSFP:
1581                 SET_SPEED(SPEED_40G);
1582                 break;
1583
1584         case FW_PORT_TYPE_CR_QSFP:
1585         case FW_PORT_TYPE_SFP28:
1586         case FW_PORT_TYPE_KR_SFP28:
1587                 FW_CAPS_TO_SPEED(SPEED_1G);
1588                 FW_CAPS_TO_SPEED(SPEED_10G);
1589                 FW_CAPS_TO_SPEED(SPEED_25G);
1590                 break;
1591
1592         case FW_PORT_TYPE_CR2_QSFP:
1593                 SET_SPEED(SPEED_50G);
1594                 break;
1595
1596         case FW_PORT_TYPE_KR4_100G:
1597         case FW_PORT_TYPE_CR4_QSFP:
1598                 FW_CAPS_TO_SPEED(SPEED_25G);
1599                 FW_CAPS_TO_SPEED(SPEED_40G);
1600                 FW_CAPS_TO_SPEED(SPEED_50G);
1601                 FW_CAPS_TO_SPEED(SPEED_100G);
1602                 break;
1603
1604         default:
1605                 break;
1606         }
1607
1608 #undef FW_CAPS_TO_SPEED
1609 #undef SET_SPEED
1610 }
1611
1612 /**
1613  * cxgbe_get_speed_caps - Fetch supported speed capabilities
1614  * @pi: Underlying port's info
1615  * @speed_caps: Device Info speed capabilities
1616  *
1617  * Fetch supported speed capabilities of the underlying port.
1618  */
1619 void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
1620 {
1621         *speed_caps = 0;
1622
1623         fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps,
1624                               speed_caps);
1625
1626         if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
1627                 *speed_caps |= ETH_LINK_SPEED_FIXED;
1628 }
1629
1630 /**
1631  * cxgbe_set_link_status - Set device link up or down.
1632  * @pi: Underlying port's info
1633  * @status: 0 - down, 1 - up
1634  *
1635  * Set the device link up or down.
1636  */
1637 int cxgbe_set_link_status(struct port_info *pi, bool status)
1638 {
1639         struct adapter *adapter = pi->adapter;
1640         int err = 0;
1641
1642         err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status);
1643         if (err) {
1644                 dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
1645                 return err;
1646         }
1647
1648         if (!status)
1649                 t4_reset_link_config(adapter, pi->pidx);
1650
1651         return 0;
1652 }
1653
1654 /**
1655  * cxgb_up - enable the adapter
1656  * @adap: adapter being enabled
1657  *
1658  * Called when the first port is enabled, this function performs the
1659  * actions necessary to make an adapter operational, such as completing
1660  * the initialization of HW modules, and enabling interrupts.
1661  */
1662 int cxgbe_up(struct adapter *adap)
1663 {
1664         enable_rx(adap, &adap->sge.fw_evtq);
1665         t4_sge_tx_monitor_start(adap);
1666         if (is_pf4(adap))
1667                 t4_intr_enable(adap);
1668         adap->flags |= FULL_INIT_DONE;
1669
1670         /* TODO: deadman watchdog ?? */
1671         return 0;
1672 }
1673
1674 /*
1675  * Close the port
1676  */
1677 int cxgbe_down(struct port_info *pi)
1678 {
1679         return cxgbe_set_link_status(pi, false);
1680 }
1681
1682 /*
1683  * Release resources when all the ports have been stopped.
1684  */
1685 void cxgbe_close(struct adapter *adapter)
1686 {
1687         struct port_info *pi;
1688         int i;
1689
1690         if (adapter->flags & FULL_INIT_DONE) {
1691                 tid_free(&adapter->tids);
1692                 t4_cleanup_clip_tbl(adapter);
1693                 t4_cleanup_l2t(adapter);
1694                 if (is_pf4(adapter))
1695                         t4_intr_disable(adapter);
1696                 t4_sge_tx_monitor_stop(adapter);
1697                 t4_free_sge_resources(adapter);
1698                 for_each_port(adapter, i) {
1699                         pi = adap2pinfo(adapter, i);
1700                         if (pi->viid != 0)
1701                                 t4_free_vi(adapter, adapter->mbox,
1702                                            adapter->pf, 0, pi->viid);
1703                         rte_free(pi->eth_dev->data->mac_addrs);
1704                         /* Skip first port since it'll be freed by DPDK stack */
1705                         if (i) {
1706                                 rte_free(pi->eth_dev->data->dev_private);
1707                                 rte_eth_dev_release_port(pi->eth_dev);
1708                         }
1709                 }
1710                 adapter->flags &= ~FULL_INIT_DONE;
1711         }
1712
1713         if (is_pf4(adapter) && (adapter->flags & FW_OK))
1714                 t4_fw_bye(adapter, adapter->mbox);
1715 }
1716
1717 int cxgbe_probe(struct adapter *adapter)
1718 {
1719         struct port_info *pi;
1720         int chip;
1721         int func, i;
1722         int err = 0;
1723         u32 whoami;
1724
1725         whoami = t4_read_reg(adapter, A_PL_WHOAMI);
1726         chip = t4_get_chip_type(adapter,
1727                         CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id));
1728         if (chip < 0)
1729                 return chip;
1730
1731         func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
1732                G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
1733
1734         adapter->mbox = func;
1735         adapter->pf = func;
1736
1737         t4_os_lock_init(&adapter->mbox_lock);
1738         TAILQ_INIT(&adapter->mbox_list);
1739         t4_os_lock_init(&adapter->win0_lock);
1740
1741         err = t4_prep_adapter(adapter);
1742         if (err)
1743                 return err;
1744
1745         setup_memwin(adapter);
1746         err = adap_init0(adapter);
1747         if (err) {
1748                 dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
1749                         __func__, err);
1750                 goto out_free;
1751         }
1752
1753         if (!is_t4(adapter->params.chip)) {
1754                 /*
1755                  * The userspace doorbell BAR is split evenly into doorbell
1756                  * regions, each associated with an egress queue.  If this
1757                  * per-queue region is large enough (at least UDBS_SEG_SIZE)
1758                  * then it can be used to submit a tx work request with an
1759                  * implied doorbell.  Enable write combining on the BAR if
1760                  * there is room for such work requests.
1761                  */
1762                 int s_qpp, qpp, num_seg;
1763
1764                 s_qpp = (S_QUEUESPERPAGEPF0 +
1765                         (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) *
1766                         adapter->pf);
1767                 qpp = 1 << ((t4_read_reg(adapter,
1768                                 A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)
1769                                 & M_QUEUESPERPAGEPF0);
1770                 num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE;
1771                 if (qpp > num_seg)
1772                         dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
1773
1774                 adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
1775                 if (!adapter->bar2) {
1776                         dev_err(adapter, "cannot map device bar2 region\n");
1777                         err = -ENOMEM;
1778                         goto out_free;
1779                 }
1780                 t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) |
1781                              V_STATMODE(0));
1782         }
1783
1784         for_each_port(adapter, i) {
1785                 const unsigned int numa_node = rte_socket_id();
1786                 char name[RTE_ETH_NAME_MAX_LEN];
1787                 struct rte_eth_dev *eth_dev;
1788
1789                 snprintf(name, sizeof(name), "%s_%d",
1790                          adapter->pdev->device.name, i);
1791
1792                 if (i == 0) {
1793                         /* First port is already allocated by DPDK */
1794                         eth_dev = adapter->eth_dev;
1795                         goto allocate_mac;
1796                 }
1797
1798                 /*
1799                  * now do all data allocation - for eth_dev structure,
1800                  * and internal (private) data for the remaining ports
1801                  */
1802
1803                 /* reserve an ethdev entry */
1804                 eth_dev = rte_eth_dev_allocate(name);
1805                 if (!eth_dev)
1806                         goto out_free;
1807
1808                 eth_dev->data->dev_private =
1809                         rte_zmalloc_socket(name, sizeof(struct port_info),
1810                                            RTE_CACHE_LINE_SIZE, numa_node);
1811                 if (!eth_dev->data->dev_private)
1812                         goto out_free;
1813
1814 allocate_mac:
1815                 pi = (struct port_info *)eth_dev->data->dev_private;
1816                 adapter->port[i] = pi;
1817                 pi->eth_dev = eth_dev;
1818                 pi->adapter = adapter;
1819                 pi->xact_addr_filt = -1;
1820                 pi->port_id = i;
1821                 pi->pidx = i;
1822
1823                 pi->eth_dev->device = &adapter->pdev->device;
1824                 pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
1825                 pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
1826                 pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
1827
1828                 rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
1829
1830                 pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
1831                                                            ETHER_ADDR_LEN, 0);
1832                 if (!pi->eth_dev->data->mac_addrs) {
1833                         dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
1834                                 __func__);
1835                         err = -1;
1836                         goto out_free;
1837                 }
1838
1839                 if (i > 0) {
1840                         /* First port will be notified by upper layer */
1841                         rte_eth_dev_probing_finish(eth_dev);
1842                 }
1843         }
1844
1845         if (adapter->flags & FW_OK) {
1846                 err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0);
1847                 if (err) {
1848                         dev_err(adapter, "%s: t4_port_init failed with err %d\n",
1849                                 __func__, err);
1850                         goto out_free;
1851                 }
1852         }
1853
1854         cfg_queues(adapter->eth_dev);
1855
1856         print_adapter_info(adapter);
1857         print_port_info(adapter);
1858
1859         adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
1860                                           adapter->clipt_end);
1861         if (!adapter->clipt) {
1862                 /* We tolerate a lack of clip_table, giving up some
1863                  * functionality
1864                  */
1865                 dev_warn(adapter, "could not allocate CLIP. Continuing\n");
1866         }
1867
1868         adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
1869         if (!adapter->l2t) {
1870                 /* We tolerate a lack of L2T, giving up some functionality */
1871                 dev_warn(adapter, "could not allocate L2T. Continuing\n");
1872         }
1873
1874         if (tid_init(&adapter->tids) < 0) {
1875                 /* Disable filtering support */
1876                 dev_warn(adapter, "could not allocate TID table, "
1877                          "filter support disabled. Continuing\n");
1878         }
1879
1880         if (is_hashfilter(adapter)) {
1881                 if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
1882                         u32 hash_base, hash_reg;
1883
1884                         hash_reg = A_LE_DB_TID_HASHBASE;
1885                         hash_base = t4_read_reg(adapter, hash_reg);
1886                         adapter->tids.hash_base = hash_base / 4;
1887                 }
1888         } else {
1889                 /* Disable hash filtering support */
1890                 dev_warn(adapter,
1891                          "Maskless filter support disabled. Continuing\n");
1892         }
1893
1894         err = init_rss(adapter);
1895         if (err)
1896                 goto out_free;
1897
1898         return 0;
1899
1900 out_free:
1901         for_each_port(adapter, i) {
1902                 pi = adap2pinfo(adapter, i);
1903                 if (pi->viid != 0)
1904                         t4_free_vi(adapter, adapter->mbox, adapter->pf,
1905                                    0, pi->viid);
1906                 /* Skip first port since it'll be de-allocated by DPDK */
1907                 if (i == 0)
1908                         continue;
1909                 if (pi->eth_dev) {
1910                         if (pi->eth_dev->data->dev_private)
1911                                 rte_free(pi->eth_dev->data->dev_private);
1912                         rte_eth_dev_release_port(pi->eth_dev);
1913                 }
1914         }
1915
1916         if (adapter->flags & FW_OK)
1917                 t4_fw_bye(adapter, adapter->mbox);
1918         return -err;
1919 }