net/virtio: improve queue init error path
[dpdk.git] / drivers / net / cxgbe / cxgbe_main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Chelsio Communications.
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14 #include <netinet/in.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <ethdev_driver.h>
31 #include <ethdev_pci.h>
32 #include <rte_random.h>
33 #include <rte_dev.h>
34 #include <rte_kvargs.h>
35
36 #include "base/common.h"
37 #include "base/t4_regs.h"
38 #include "base/t4_msg.h"
39 #include "cxgbe.h"
40 #include "cxgbe_pfvf.h"
41 #include "clip_tbl.h"
42 #include "l2t.h"
43 #include "smt.h"
44 #include "mps_tcam.h"
45
46 static const u16 cxgbe_filter_mode_features[] = {
47         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE |
48          F_PROTOCOL | F_PORT),
49         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE |
50          F_PROTOCOL | F_FCOE),
51         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS |
52          F_PORT),
53         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS |
54          F_FCOE),
55         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_PORT |
56          F_FCOE),
57         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_TOS |
58          F_PORT | F_FCOE),
59         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VLAN |
60          F_FCOE),
61         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VNIC_ID |
62          F_FCOE),
63         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VLAN |
64          F_FCOE),
65         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VNIC_ID |
66          F_FCOE),
67         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_VLAN | F_PORT |
68          F_FCOE),
69         (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_VNIC_ID | F_PORT |
70          F_FCOE),
71         (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_PROTOCOL | F_TOS |
72          F_PORT | F_FCOE),
73         (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_PORT),
74         (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_FCOE),
75         (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_PORT),
76         (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_FCOE),
77         (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_PORT),
78         (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_FCOE),
79         (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID |
80          F_PORT),
81         (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID |
82          F_FCOE),
83         (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_VLAN | F_PORT |
84          F_FCOE),
85         (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_VNIC_ID | F_PORT |
86          F_FCOE),
87         (F_FRAGMENTATION | F_MPSHITTYPE | F_TOS | F_VLAN | F_PORT | F_FCOE),
88         (F_FRAGMENTATION | F_MPSHITTYPE | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
89         (F_FRAGMENTATION | F_MPSHITTYPE | F_VLAN | F_VNIC_ID | F_FCOE),
90         (F_FRAGMENTATION | F_MACMATCH | F_ETHERTYPE | F_PROTOCOL | F_PORT |
91          F_FCOE),
92         (F_FRAGMENTATION | F_MACMATCH | F_ETHERTYPE | F_TOS | F_PORT | F_FCOE),
93         (F_FRAGMENTATION | F_MACMATCH | F_PROTOCOL | F_VLAN | F_PORT | F_FCOE),
94         (F_FRAGMENTATION | F_MACMATCH | F_PROTOCOL | F_VNIC_ID | F_PORT |
95          F_FCOE),
96         (F_FRAGMENTATION | F_MACMATCH | F_TOS | F_VLAN | F_PORT | F_FCOE),
97         (F_FRAGMENTATION | F_MACMATCH | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
98         (F_FRAGMENTATION | F_ETHERTYPE | F_VLAN | F_PORT | F_FCOE),
99         (F_FRAGMENTATION | F_ETHERTYPE | F_VNIC_ID | F_PORT | F_FCOE),
100         (F_FRAGMENTATION | F_PROTOCOL | F_TOS | F_VLAN | F_FCOE),
101         (F_FRAGMENTATION | F_PROTOCOL | F_TOS | F_VNIC_ID | F_FCOE),
102         (F_FRAGMENTATION | F_VLAN | F_VNIC_ID | F_PORT | F_FCOE),
103         (F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_PROTOCOL | F_PORT |
104          F_FCOE),
105         (F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS | F_PORT | F_FCOE),
106         (F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VLAN | F_PORT),
107         (F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VNIC_ID | F_PORT),
108         (F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VLAN | F_PORT),
109         (F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VNIC_ID | F_PORT),
110         (F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_PORT | F_FCOE),
111         (F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_PORT | F_FCOE),
112         (F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_PORT | F_FCOE),
113         (F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
114         (F_MPSHITTYPE | F_VLAN | F_VNIC_ID | F_PORT),
115 };
116
117 /**
118  * Allocate a chunk of memory. The allocated memory is cleared.
119  */
120 void *t4_alloc_mem(size_t size)
121 {
122         return rte_zmalloc(NULL, size, 0);
123 }
124
125 /**
126  * Free memory allocated through t4_alloc_mem().
127  */
128 void t4_free_mem(void *addr)
129 {
130         rte_free(addr);
131 }
132
133 /*
134  * Response queue handler for the FW event queue.
135  */
136 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
137                           __rte_unused const struct pkt_gl *gl)
138 {
139         u8 opcode = ((const struct rss_header *)rsp)->opcode;
140
141         rsp++;                                          /* skip RSS header */
142
143         /*
144          * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
145          */
146         if (unlikely(opcode == CPL_FW4_MSG &&
147                      ((const struct cpl_fw4_msg *)rsp)->type ==
148                       FW_TYPE_RSSCPL)) {
149                 rsp++;
150                 opcode = ((const struct rss_header *)rsp)->opcode;
151                 rsp++;
152                 if (opcode != CPL_SGE_EGR_UPDATE) {
153                         dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n",
154                                 opcode);
155                         goto out;
156                 }
157         }
158
159         if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
160                 /* do nothing */
161         } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
162                 const struct cpl_fw6_msg *msg = (const void *)rsp;
163
164                 t4_handle_fw_rpl(q->adapter, msg->data);
165         } else if (opcode == CPL_ABORT_RPL_RSS) {
166                 const struct cpl_abort_rpl_rss *p = (const void *)rsp;
167
168                 cxgbe_hash_del_filter_rpl(q->adapter, p);
169         } else if (opcode == CPL_SET_TCB_RPL) {
170                 const struct cpl_set_tcb_rpl *p = (const void *)rsp;
171
172                 cxgbe_filter_rpl(q->adapter, p);
173         } else if (opcode == CPL_ACT_OPEN_RPL) {
174                 const struct cpl_act_open_rpl *p = (const void *)rsp;
175
176                 cxgbe_hash_filter_rpl(q->adapter, p);
177         } else if (opcode == CPL_L2T_WRITE_RPL) {
178                 const struct cpl_l2t_write_rpl *p = (const void *)rsp;
179
180                 cxgbe_do_l2t_write_rpl(q->adapter, p);
181         } else if (opcode == CPL_SMT_WRITE_RPL) {
182                 const struct cpl_smt_write_rpl *p = (const void *)rsp;
183
184                 cxgbe_do_smt_write_rpl(q->adapter, p);
185         } else {
186                 dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
187                         opcode);
188         }
189 out:
190         return 0;
191 }
192
193 /**
194  * Setup sge control queues to pass control information.
195  */
196 int cxgbe_setup_sge_ctrl_txq(struct adapter *adapter)
197 {
198         struct sge *s = &adapter->sge;
199         int err = 0, i = 0;
200
201         for_each_port(adapter, i) {
202                 struct port_info *pi = adap2pinfo(adapter, i);
203                 char name[RTE_ETH_NAME_MAX_LEN];
204                 struct sge_ctrl_txq *q = &s->ctrlq[i];
205
206                 q->q.size = 1024;
207                 err = t4_sge_alloc_ctrl_txq(adapter, q,
208                                             adapter->eth_dev,  i,
209                                             s->fw_evtq.cntxt_id,
210                                             rte_socket_id());
211                 if (err) {
212                         dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
213                                 err);
214                         goto out;
215                 }
216                 snprintf(name, sizeof(name), "%s_ctrl_pool_%d",
217                          pi->eth_dev->device->driver->name,
218                          pi->eth_dev->data->port_id);
219                 q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
220                                                      RTE_CACHE_LINE_SIZE,
221                                                      RTE_MBUF_PRIV_ALIGN,
222                                                      RTE_MBUF_DEFAULT_BUF_SIZE,
223                                                      SOCKET_ID_ANY);
224                 if (!q->mb_pool) {
225                         err = -rte_errno;
226                         dev_err(adapter,
227                                 "Can't create ctrl pool for port %d. Err: %d\n",
228                                 pi->eth_dev->data->port_id, err);
229                         goto out;
230                 }
231         }
232         return 0;
233 out:
234         t4_free_sge_resources(adapter);
235         return err;
236 }
237
238 /**
239  * cxgbe_poll_for_completion: Poll rxq for completion
240  * @q: rxq to poll
241  * @ms: milliseconds to delay
242  * @cnt: number of times to poll
243  * @c: completion to check for 'done' status
244  *
245  * Polls the rxq for reples until completion is done or the count
246  * expires.
247  */
248 int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int ms,
249                               unsigned int cnt, struct t4_completion *c)
250 {
251         unsigned int i;
252         unsigned int work_done, budget = 32;
253
254         if (!c)
255                 return -EINVAL;
256
257         for (i = 0; i < cnt; i++) {
258                 cxgbe_poll(q, NULL, budget, &work_done);
259                 t4_os_lock(&c->lock);
260                 if (c->done) {
261                         t4_os_unlock(&c->lock);
262                         return 0;
263                 }
264                 t4_os_unlock(&c->lock);
265                 rte_delay_ms(ms);
266         }
267         return -ETIMEDOUT;
268 }
269
270 int cxgbe_setup_sge_fwevtq(struct adapter *adapter)
271 {
272         struct sge *s = &adapter->sge;
273         int err = 0;
274         int msi_idx = 0;
275
276         err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev,
277                                msi_idx, NULL, fwevtq_handler, -1, NULL, 0,
278                                rte_socket_id());
279         return err;
280 }
281
282 static int closest_timer(const struct sge *s, int time)
283 {
284         unsigned int i, match = 0;
285         int delta, min_delta = INT_MAX;
286
287         for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
288                 delta = time - s->timer_val[i];
289                 if (delta < 0)
290                         delta = -delta;
291                 if (delta < min_delta) {
292                         min_delta = delta;
293                         match = i;
294                 }
295         }
296         return match;
297 }
298
299 static int closest_thres(const struct sge *s, int thres)
300 {
301         unsigned int i, match = 0;
302         int delta, min_delta = INT_MAX;
303
304         for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
305                 delta = thres - s->counter_val[i];
306                 if (delta < 0)
307                         delta = -delta;
308                 if (delta < min_delta) {
309                         min_delta = delta;
310                         match = i;
311                 }
312         }
313         return match;
314 }
315
316 /**
317  * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
318  * @q: the Rx queue
319  * @us: the hold-off time in us, or 0 to disable timer
320  * @cnt: the hold-off packet count, or 0 to disable counter
321  *
322  * Sets an Rx queue's interrupt hold-off time and packet count.  At least
323  * one of the two needs to be enabled for the queue to generate interrupts.
324  */
325 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
326                                unsigned int cnt)
327 {
328         struct adapter *adap = q->adapter;
329         unsigned int timer_val;
330
331         if (cnt) {
332                 int err;
333                 u32 v, new_idx;
334
335                 new_idx = closest_thres(&adap->sge, cnt);
336                 if (q->desc && q->pktcnt_idx != new_idx) {
337                         /* the queue has already been created, update it */
338                         v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
339                             V_FW_PARAMS_PARAM_X(
340                             FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
341                             V_FW_PARAMS_PARAM_YZ(q->cntxt_id);
342                         err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
343                                             &v, &new_idx);
344                         if (err)
345                                 return err;
346                 }
347                 q->pktcnt_idx = new_idx;
348         }
349
350         timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER :
351                                 closest_timer(&adap->sge, us);
352
353         if ((us | cnt) == 0)
354                 q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
355         else
356                 q->intr_params = V_QINTR_TIMER_IDX(timer_val) |
357                                  V_QINTR_CNT_EN(cnt > 0);
358         return 0;
359 }
360
361 /**
362  * Allocate an active-open TID and set it to the supplied value.
363  */
364 int cxgbe_alloc_atid(struct tid_info *t, void *data)
365 {
366         int atid = -1;
367
368         t4_os_lock(&t->atid_lock);
369         if (t->afree) {
370                 union aopen_entry *p = t->afree;
371
372                 atid = p - t->atid_tab;
373                 t->afree = p->next;
374                 p->data = data;
375                 t->atids_in_use++;
376         }
377         t4_os_unlock(&t->atid_lock);
378         return atid;
379 }
380
381 /**
382  * Release an active-open TID.
383  */
384 void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
385 {
386         union aopen_entry *p = &t->atid_tab[atid];
387
388         t4_os_lock(&t->atid_lock);
389         p->next = t->afree;
390         t->afree = p;
391         t->atids_in_use--;
392         t4_os_unlock(&t->atid_lock);
393 }
394
395 /**
396  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
397  */
398 static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
399 {
400         struct cpl_tid_release *req;
401
402         req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
403         INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
404 }
405
406 /**
407  * Release a TID and inform HW.  If we are unable to allocate the release
408  * message we defer to a work queue.
409  */
410 void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
411                       unsigned short family)
412 {
413         struct rte_mbuf *mbuf;
414         struct adapter *adap = container_of(t, struct adapter, tids);
415
416         WARN_ON(tid >= t->ntids);
417
418         if (t->tid_tab[tid]) {
419                 t->tid_tab[tid] = NULL;
420                 rte_atomic32_dec(&t->conns_in_use);
421                 if (t->hash_base && tid >= t->hash_base) {
422                         if (family == FILTER_TYPE_IPV4)
423                                 rte_atomic32_dec(&t->hash_tids_in_use);
424                 } else {
425                         if (family == FILTER_TYPE_IPV4)
426                                 rte_atomic32_dec(&t->tids_in_use);
427                 }
428         }
429
430         mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
431         if (mbuf) {
432                 mbuf->data_len = sizeof(struct cpl_tid_release);
433                 mbuf->pkt_len = mbuf->data_len;
434                 mk_tid_release(mbuf, tid);
435                 t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
436         }
437 }
438
439 /**
440  * Insert a TID.
441  */
442 void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
443                       unsigned short family)
444 {
445         t->tid_tab[tid] = data;
446         if (t->hash_base && tid >= t->hash_base) {
447                 if (family == FILTER_TYPE_IPV4)
448                         rte_atomic32_inc(&t->hash_tids_in_use);
449         } else {
450                 if (family == FILTER_TYPE_IPV4)
451                         rte_atomic32_inc(&t->tids_in_use);
452         }
453
454         rte_atomic32_inc(&t->conns_in_use);
455 }
456
457 /**
458  * Free TID tables.
459  */
460 static void tid_free(struct tid_info *t)
461 {
462         if (t->tid_tab) {
463                 if (t->ftid_bmap)
464                         rte_bitmap_free(t->ftid_bmap);
465
466                 if (t->ftid_bmap_array)
467                         t4_os_free(t->ftid_bmap_array);
468
469                 t4_os_free(t->tid_tab);
470         }
471
472         memset(t, 0, sizeof(struct tid_info));
473 }
474
475 /**
476  * Allocate and initialize the TID tables.  Returns 0 on success.
477  */
478 static int tid_init(struct tid_info *t)
479 {
480         size_t size;
481         unsigned int ftid_bmap_size;
482         unsigned int natids = t->natids;
483         unsigned int max_ftids = t->nftids;
484
485         ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
486         size = t->ntids * sizeof(*t->tid_tab) +
487                 max_ftids * sizeof(*t->ftid_tab) +
488                 natids * sizeof(*t->atid_tab);
489
490         t->tid_tab = t4_os_alloc(size);
491         if (!t->tid_tab)
492                 return -ENOMEM;
493
494         t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
495         t->ftid_tab = (struct filter_entry *)&t->atid_tab[t->natids];
496         t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
497         if (!t->ftid_bmap_array) {
498                 tid_free(t);
499                 return -ENOMEM;
500         }
501
502         t4_os_lock_init(&t->atid_lock);
503         t4_os_lock_init(&t->ftid_lock);
504
505         t->afree = NULL;
506         t->atids_in_use = 0;
507         rte_atomic32_init(&t->tids_in_use);
508         rte_atomic32_set(&t->tids_in_use, 0);
509         rte_atomic32_init(&t->conns_in_use);
510         rte_atomic32_set(&t->conns_in_use, 0);
511
512         /* Setup the free list for atid_tab and clear the stid bitmap. */
513         if (natids) {
514                 while (--natids)
515                         t->atid_tab[natids - 1].next = &t->atid_tab[natids];
516                 t->afree = t->atid_tab;
517         }
518
519         t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
520                                        ftid_bmap_size);
521         if (!t->ftid_bmap) {
522                 tid_free(t);
523                 return -ENOMEM;
524         }
525
526         return 0;
527 }
528
529 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
530                       unsigned int us, unsigned int cnt,
531                       unsigned int size, unsigned int iqe_size)
532 {
533         q->adapter = adap;
534         cxgb4_set_rspq_intr_params(q, us, cnt);
535         q->iqe_len = iqe_size;
536         q->size = size;
537 }
538
539 int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev)
540 {
541         struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
542         struct adapter *adap = pi->adapter;
543         u16 first_txq = 0, first_rxq = 0;
544         struct sge *s = &adap->sge;
545         u16 i, max_rxqs, max_txqs;
546
547         max_rxqs = s->max_ethqsets;
548         max_txqs = s->max_ethqsets;
549         for_each_port(adap, i) {
550                 temp_pi = adap2pinfo(adap, i);
551                 if (i == pi->port_id)
552                         break;
553
554                 if (max_rxqs <= temp_pi->n_rx_qsets ||
555                     max_txqs <= temp_pi->n_tx_qsets)
556                         return -ENOMEM;
557
558                 first_rxq += temp_pi->n_rx_qsets;
559                 first_txq += temp_pi->n_tx_qsets;
560                 max_rxqs -= temp_pi->n_rx_qsets;
561                 max_txqs -= temp_pi->n_tx_qsets;
562         }
563
564         if ((eth_dev->data->nb_rx_queues < 1) ||
565             (eth_dev->data->nb_tx_queues < 1))
566                 return -EINVAL;
567
568         if (eth_dev->data->nb_rx_queues > max_rxqs ||
569             eth_dev->data->nb_tx_queues > max_txqs)
570                 return -EINVAL;
571
572         /* We must configure RSS, since config has changed*/
573         pi->flags &= ~PORT_RSS_DONE;
574
575         pi->n_rx_qsets = eth_dev->data->nb_rx_queues;
576         pi->n_tx_qsets = eth_dev->data->nb_tx_queues;
577         pi->first_rxqset = first_rxq;
578         pi->first_txqset = first_txq;
579
580         return 0;
581 }
582
583 void cxgbe_cfg_queues_free(struct adapter *adap)
584 {
585         if (adap->sge.ethtxq) {
586                 rte_free(adap->sge.ethtxq);
587                 adap->sge.ethtxq = NULL;
588         }
589
590         if (adap->sge.ethrxq) {
591                 rte_free(adap->sge.ethrxq);
592                 adap->sge.ethrxq = NULL;
593         }
594
595         adap->flags &= ~CFG_QUEUES;
596 }
597
598 int cxgbe_cfg_queues(struct rte_eth_dev *eth_dev)
599 {
600         struct port_info *pi = eth_dev->data->dev_private;
601         struct adapter *adap = pi->adapter;
602         struct sge *s = &adap->sge;
603         u16 i;
604
605         if (!(adap->flags & CFG_QUEUES)) {
606                 s->ethrxq = rte_calloc_socket(NULL, s->max_ethqsets,
607                                               sizeof(struct sge_eth_rxq), 0,
608                                               rte_socket_id());
609                 if (!s->ethrxq)
610                         return -ENOMEM;
611
612                 s->ethtxq = rte_calloc_socket(NULL, s->max_ethqsets,
613                                               sizeof(struct sge_eth_txq), 0,
614                                               rte_socket_id());
615                 if (!s->ethtxq) {
616                         rte_free(s->ethrxq);
617                         s->ethrxq = NULL;
618                         return -ENOMEM;
619                 }
620
621                 for (i = 0; i < s->max_ethqsets; i++) {
622                         struct sge_eth_rxq *r = &s->ethrxq[i];
623                         struct sge_eth_txq *t = &s->ethtxq[i];
624
625                         init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
626                         r->usembufs = 1;
627                         r->fl.size = (r->usembufs ? 1024 : 72);
628
629                         t->q.size = 1024;
630                 }
631
632                 init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
633                 adap->flags |= CFG_QUEUES;
634         }
635
636         return 0;
637 }
638
639 void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats)
640 {
641         t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats,
642                                  &pi->stats_base);
643 }
644
645 void cxgbe_stats_reset(struct port_info *pi)
646 {
647         t4_clr_port_stats(pi->adapter, pi->tx_chan);
648 }
649
650 static void setup_memwin(struct adapter *adap)
651 {
652         u32 mem_win0_base;
653
654         /* For T5, only relative offset inside the PCIe BAR is passed */
655         mem_win0_base = MEMWIN0_BASE;
656
657         /*
658          * Set up memory window for accessing adapter memory ranges.  (Read
659          * back MA register to ensure that changes propagate before we attempt
660          * to use the new values.)
661          */
662         t4_write_reg(adap,
663                      PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
664                                          MEMWIN_NIC),
665                      mem_win0_base | V_BIR(0) |
666                      V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
667         t4_read_reg(adap,
668                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
669                                         MEMWIN_NIC));
670 }
671
672 int cxgbe_init_rss(struct adapter *adap)
673 {
674         unsigned int i;
675
676         if (is_pf4(adap)) {
677                 int err;
678
679                 err = t4_init_rss_mode(adap, adap->mbox);
680                 if (err)
681                         return err;
682         }
683
684         for_each_port(adap, i) {
685                 struct port_info *pi = adap2pinfo(adap, i);
686
687                 pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
688                 if (!pi->rss)
689                         return -ENOMEM;
690
691                 pi->rss_hf = CXGBE_RSS_HF_ALL;
692         }
693         return 0;
694 }
695
696 /**
697  * Dump basic information about the adapter.
698  */
699 void cxgbe_print_adapter_info(struct adapter *adap)
700 {
701         /**
702          * Hardware/Firmware/etc. Version/Revision IDs.
703          */
704         t4_dump_version_info(adap);
705 }
706
707 void cxgbe_print_port_info(struct adapter *adap)
708 {
709         int i;
710         char buf[80];
711         struct rte_pci_addr *loc = &adap->pdev->addr;
712
713         for_each_port(adap, i) {
714                 const struct port_info *pi = adap2pinfo(adap, i);
715                 char *bufp = buf;
716
717                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
718                         bufp += sprintf(bufp, "100M/");
719                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
720                         bufp += sprintf(bufp, "1G/");
721                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
722                         bufp += sprintf(bufp, "10G/");
723                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
724                         bufp += sprintf(bufp, "25G/");
725                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
726                         bufp += sprintf(bufp, "40G/");
727                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
728                         bufp += sprintf(bufp, "50G/");
729                 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
730                         bufp += sprintf(bufp, "100G/");
731                 if (bufp != buf)
732                         --bufp;
733                 sprintf(bufp, "BASE-%s",
734                         t4_get_port_type_description(
735                                 (enum fw_port_type)pi->link_cfg.port_type));
736
737                 dev_info(adap,
738                          " " PCI_PRI_FMT " Chelsio rev %d %s %s\n",
739                          loc->domain, loc->bus, loc->devid, loc->function,
740                          CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
741                          (adap->flags & USING_MSIX) ? " MSI-X" :
742                          (adap->flags & USING_MSI) ? " MSI" : "");
743         }
744 }
745
746 static int check_devargs_handler(const char *key, const char *value, void *p)
747 {
748         if (!strncmp(key, CXGBE_DEVARG_CMN_KEEP_OVLAN, strlen(key)) ||
749             !strncmp(key, CXGBE_DEVARG_CMN_TX_MODE_LATENCY, strlen(key)) ||
750             !strncmp(key, CXGBE_DEVARG_VF_FORCE_LINK_UP, strlen(key))) {
751                 if (!strncmp(value, "1", 1)) {
752                         bool *dst_val = (bool *)p;
753
754                         *dst_val = true;
755                 }
756         }
757
758         if (!strncmp(key, CXGBE_DEVARG_PF_FILTER_MODE, strlen(key)) ||
759             !strncmp(key, CXGBE_DEVARG_PF_FILTER_MASK, strlen(key))) {
760                 u32 *dst_val = (u32 *)p;
761                 char *endptr = NULL;
762                 u32 arg_val;
763
764                 arg_val = strtoul(value, &endptr, 16);
765                 if (errno || endptr == value)
766                         return -EINVAL;
767
768                 *dst_val = arg_val;
769         }
770
771         return 0;
772 }
773
774 static int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key,
775                              void *p)
776 {
777         struct rte_kvargs *kvlist;
778         int ret = 0;
779
780         if (!devargs)
781                 return 0;
782
783         kvlist = rte_kvargs_parse(devargs->args, NULL);
784         if (!kvlist)
785                 return 0;
786
787         if (!rte_kvargs_count(kvlist, key))
788                 goto out;
789
790         ret = rte_kvargs_process(kvlist, key, check_devargs_handler, p);
791
792 out:
793         rte_kvargs_free(kvlist);
794
795         return ret;
796 }
797
798 static void cxgbe_get_devargs_int(struct adapter *adap, bool *dst,
799                                   const char *key, bool default_value)
800 {
801         struct rte_pci_device *pdev = adap->pdev;
802         int ret;
803         bool devarg_value = default_value;
804
805         *dst = default_value;
806         if (!pdev)
807                 return;
808
809         ret = cxgbe_get_devargs(pdev->device.devargs, key, &devarg_value);
810         if (ret)
811                 return;
812
813         *dst = devarg_value;
814 }
815
816 static void cxgbe_get_devargs_u32(struct adapter *adap, u32 *dst,
817                                   const char *key, u32 default_value)
818 {
819         struct rte_pci_device *pdev = adap->pdev;
820         u32 devarg_value = default_value;
821         int ret;
822
823         *dst = default_value;
824         if (!pdev)
825                 return;
826
827         ret = cxgbe_get_devargs(pdev->device.devargs, key, &devarg_value);
828         if (ret)
829                 return;
830
831         *dst = devarg_value;
832 }
833
834 void cxgbe_process_devargs(struct adapter *adap)
835 {
836         cxgbe_get_devargs_int(adap, &adap->devargs.keep_ovlan,
837                               CXGBE_DEVARG_CMN_KEEP_OVLAN, false);
838         cxgbe_get_devargs_int(adap, &adap->devargs.tx_mode_latency,
839                               CXGBE_DEVARG_CMN_TX_MODE_LATENCY, false);
840         cxgbe_get_devargs_int(adap, &adap->devargs.force_link_up,
841                               CXGBE_DEVARG_VF_FORCE_LINK_UP, false);
842         cxgbe_get_devargs_u32(adap, &adap->devargs.filtermode,
843                               CXGBE_DEVARG_PF_FILTER_MODE, 0);
844         cxgbe_get_devargs_u32(adap, &adap->devargs.filtermask,
845                               CXGBE_DEVARG_PF_FILTER_MASK, 0);
846 }
847
848 static void configure_vlan_types(struct adapter *adapter)
849 {
850         int i;
851
852         for_each_port(adapter, i) {
853                 /* OVLAN Type 0x88a8 */
854                 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
855                                  V_OVLAN_MASK(M_OVLAN_MASK) |
856                                  V_OVLAN_ETYPE(M_OVLAN_ETYPE),
857                                  V_OVLAN_MASK(M_OVLAN_MASK) |
858                                  V_OVLAN_ETYPE(0x88a8));
859                 /* OVLAN Type 0x9100 */
860                 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
861                                  V_OVLAN_MASK(M_OVLAN_MASK) |
862                                  V_OVLAN_ETYPE(M_OVLAN_ETYPE),
863                                  V_OVLAN_MASK(M_OVLAN_MASK) |
864                                  V_OVLAN_ETYPE(0x9100));
865
866                 /* IVLAN 0X8100 */
867                 t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
868                                  V_IVLAN_ETYPE(M_IVLAN_ETYPE),
869                                  V_IVLAN_ETYPE(0x8100));
870
871                 t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
872                                  F_OVLAN_EN0 | F_OVLAN_EN1 |
873                                  F_IVLAN_EN,
874                                  F_OVLAN_EN0 | F_OVLAN_EN1 |
875                                  F_IVLAN_EN);
876         }
877
878         t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, V_RM_OVLAN(1),
879                                V_RM_OVLAN(!adapter->devargs.keep_ovlan));
880 }
881
882 static int cxgbe_get_filter_vnic_mode_from_devargs(u32 val)
883 {
884         u32 vnic_mode;
885
886         vnic_mode = val & (CXGBE_DEVARGS_FILTER_MODE_PF_VF |
887                            CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER);
888         if (vnic_mode) {
889                 switch (vnic_mode) {
890                 case CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER:
891                         return CXGBE_FILTER_VNIC_MODE_OVLAN;
892                 case CXGBE_DEVARGS_FILTER_MODE_PF_VF:
893                         return CXGBE_FILTER_VNIC_MODE_PFVF;
894                 default:
895                         return -EINVAL;
896                 }
897         }
898
899         return CXGBE_FILTER_VNIC_MODE_NONE;
900 }
901
902 static int cxgbe_get_filter_mode_from_devargs(u32 val, bool closest_match)
903 {
904         int vnic_mode, fmode = 0;
905         bool found = false;
906         u8 i;
907
908         if (val >= CXGBE_DEVARGS_FILTER_MODE_MAX) {
909                 pr_err("Unsupported flags set in filter mode. Must be < 0x%x\n",
910                        CXGBE_DEVARGS_FILTER_MODE_MAX);
911                 return -ERANGE;
912         }
913
914         vnic_mode = cxgbe_get_filter_vnic_mode_from_devargs(val);
915         if (vnic_mode < 0) {
916                 pr_err("Unsupported Vnic-mode, more than 1 Vnic-mode selected\n");
917                 return vnic_mode;
918         }
919
920         if (vnic_mode)
921                 fmode |= F_VNIC_ID;
922         if (val & CXGBE_DEVARGS_FILTER_MODE_PHYSICAL_PORT)
923                 fmode |= F_PORT;
924         if (val & CXGBE_DEVARGS_FILTER_MODE_ETHERNET_DSTMAC)
925                 fmode |= F_MACMATCH;
926         if (val & CXGBE_DEVARGS_FILTER_MODE_ETHERNET_ETHTYPE)
927                 fmode |= F_ETHERTYPE;
928         if (val & CXGBE_DEVARGS_FILTER_MODE_VLAN_INNER)
929                 fmode |= F_VLAN;
930         if (val & CXGBE_DEVARGS_FILTER_MODE_IP_TOS)
931                 fmode |= F_TOS;
932         if (val & CXGBE_DEVARGS_FILTER_MODE_IP_PROTOCOL)
933                 fmode |= F_PROTOCOL;
934
935         for (i = 0; i < ARRAY_SIZE(cxgbe_filter_mode_features); i++) {
936                 if ((cxgbe_filter_mode_features[i] & fmode) == fmode) {
937                         found = true;
938                         break;
939                 }
940         }
941
942         if (!found)
943                 return -EINVAL;
944
945         return closest_match ? cxgbe_filter_mode_features[i] : fmode;
946 }
947
948 static int configure_filter_mode_mask(struct adapter *adap)
949 {
950         u32 params[2], val[2], nparams = 0;
951         int ret;
952
953         if (!adap->devargs.filtermode && !adap->devargs.filtermask)
954                 return 0;
955
956         if (!adap->devargs.filtermode || !adap->devargs.filtermask) {
957                 pr_err("Unsupported, Provide both filtermode and filtermask devargs\n");
958                 return -EINVAL;
959         }
960
961         if (adap->devargs.filtermask & ~adap->devargs.filtermode) {
962                 pr_err("Unsupported, filtermask (0x%x) must be subset of filtermode (0x%x)\n",
963                        adap->devargs.filtermask, adap->devargs.filtermode);
964
965                 return -EINVAL;
966         }
967
968         params[0] = CXGBE_FW_PARAM_DEV(FILTER) |
969                     V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK);
970
971         ret = cxgbe_get_filter_mode_from_devargs(adap->devargs.filtermode,
972                                                  true);
973         if (ret < 0) {
974                 pr_err("Unsupported filtermode devargs combination:0x%x\n",
975                        adap->devargs.filtermode);
976                 return ret;
977         }
978
979         val[0] = V_FW_PARAMS_PARAM_FILTER_MODE(ret);
980
981         ret = cxgbe_get_filter_mode_from_devargs(adap->devargs.filtermask,
982                                                  false);
983         if (ret < 0) {
984                 pr_err("Unsupported filtermask devargs combination:0x%x\n",
985                        adap->devargs.filtermask);
986                 return ret;
987         }
988
989         val[0] |= V_FW_PARAMS_PARAM_FILTER_MASK(ret);
990
991         nparams++;
992
993         ret = cxgbe_get_filter_vnic_mode_from_devargs(adap->devargs.filtermode);
994         if (ret < 0)
995                 return ret;
996
997         if (ret) {
998                 params[1] = CXGBE_FW_PARAM_DEV(FILTER) |
999                             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE);
1000
1001                 val[1] = ret - 1;
1002
1003                 nparams++;
1004         }
1005
1006         return t4_set_params(adap, adap->mbox, adap->pf, 0, nparams,
1007                              params, val);
1008 }
1009
1010 static void configure_pcie_ext_tag(struct adapter *adapter)
1011 {
1012         u16 v;
1013         int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
1014
1015         if (!pos)
1016                 return;
1017
1018         if (pos > 0) {
1019                 t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v);
1020                 v |= PCI_EXP_DEVCTL_EXT_TAG;
1021                 t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v);
1022                 if (is_t6(adapter->params.chip)) {
1023                         t4_set_reg_field(adapter, A_PCIE_CFG2,
1024                                          V_T6_TOTMAXTAG(M_T6_TOTMAXTAG),
1025                                          V_T6_TOTMAXTAG(7));
1026                         t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
1027                                          V_T6_MINTAG(M_T6_MINTAG),
1028                                          V_T6_MINTAG(8));
1029                 } else {
1030                         t4_set_reg_field(adapter, A_PCIE_CFG2,
1031                                          V_TOTMAXTAG(M_TOTMAXTAG),
1032                                          V_TOTMAXTAG(3));
1033                         t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
1034                                          V_MINTAG(M_MINTAG),
1035                                          V_MINTAG(8));
1036                 }
1037         }
1038 }
1039
1040 /* Figure out how many Queue Sets we can support */
1041 void cxgbe_configure_max_ethqsets(struct adapter *adapter)
1042 {
1043         unsigned int ethqsets, reserved;
1044
1045         /* We need to reserve an Ingress Queue for the Asynchronous Firmware
1046          * Event Queue and 1 Control Queue per port.
1047          *
1048          * For each Queue Set, we'll need the ability to allocate two Egress
1049          * Contexts -- one for the Ingress Queue Free List and one for the TX
1050          * Ethernet Queue.
1051          */
1052         reserved = max(adapter->params.nports, 1);
1053         if (is_pf4(adapter)) {
1054                 struct pf_resources *pfres = &adapter->params.pfres;
1055
1056                 ethqsets = min(pfres->niqflint, pfres->nethctrl);
1057                 if (ethqsets > (pfres->neq / 2))
1058                         ethqsets = pfres->neq / 2;
1059         } else {
1060                 struct vf_resources *vfres = &adapter->params.vfres;
1061
1062                 ethqsets = min(vfres->niqflint, vfres->nethctrl);
1063                 if (ethqsets > (vfres->neq / 2))
1064                         ethqsets = vfres->neq / 2;
1065         }
1066
1067         ethqsets -= reserved;
1068         adapter->sge.max_ethqsets = ethqsets;
1069 }
1070
1071 /*
1072  * Tweak configuration based on system architecture, etc.  Most of these have
1073  * defaults assigned to them by Firmware Configuration Files (if we're using
1074  * them) but need to be explicitly set if we're using hard-coded
1075  * initialization. So these are essentially common tweaks/settings for
1076  * Configuration Files and hard-coded initialization ...
1077  */
1078 static int adap_init0_tweaks(struct adapter *adapter)
1079 {
1080         u8 rx_dma_offset;
1081
1082         /*
1083          * Fix up various Host-Dependent Parameters like Page Size, Cache
1084          * Line Size, etc.  The firmware default is for a 4KB Page Size and
1085          * 64B Cache Line Size ...
1086          */
1087         t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES,
1088                                     T5_LAST_REV);
1089
1090         /*
1091          * Keep the chip default offset to deliver Ingress packets into our
1092          * DMA buffers to zero
1093          */
1094         rx_dma_offset = 0;
1095         t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
1096                          V_PKTSHIFT(rx_dma_offset));
1097
1098         t4_set_reg_field(adapter, A_SGE_FLM_CFG,
1099                          V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
1100                          V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
1101
1102         t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,
1103                          V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));
1104
1105         t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
1106                          V_IDMAARBROUNDROBIN(1U));
1107
1108         /*
1109          * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
1110          * adds the pseudo header itself.
1111          */
1112         t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
1113                                F_CSUM_HAS_PSEUDO_HDR, 0);
1114
1115         return 0;
1116 }
1117
1118 /*
1119  * Attempt to initialize the adapter via a Firmware Configuration File.
1120  */
1121 static int adap_init0_config(struct adapter *adapter, int reset)
1122 {
1123         u32 finiver, finicsum, cfcsum, param, val;
1124         struct fw_caps_config_cmd caps_cmd;
1125         unsigned long mtype = 0, maddr = 0;
1126         u8 config_issued = 0;
1127         char config_name[20];
1128         int cfg_addr, ret;
1129
1130         /*
1131          * Reset device if necessary.
1132          */
1133         if (reset) {
1134                 ret = t4_fw_reset(adapter, adapter->mbox,
1135                                   F_PIORSTMODE | F_PIORST);
1136                 if (ret < 0) {
1137                         dev_warn(adapter, "Firmware reset failed, error %d\n",
1138                                  -ret);
1139                         goto bye;
1140                 }
1141         }
1142
1143         cfg_addr = t4_flash_cfg_addr(adapter);
1144         if (cfg_addr < 0) {
1145                 ret = cfg_addr;
1146                 dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n",
1147                          -ret);
1148                 goto bye;
1149         }
1150
1151         strcpy(config_name, "On Flash");
1152         mtype = FW_MEMTYPE_CF_FLASH;
1153         maddr = cfg_addr;
1154
1155         /* Enable HASH filter region when support is available. */
1156         val = 1;
1157         param = CXGBE_FW_PARAM_DEV(HASHFILTER_WITH_OFLD);
1158         t4_set_params(adapter, adapter->mbox, adapter->pf, 0, 1,
1159                       &param, &val);
1160
1161         /*
1162          * Issue a Capability Configuration command to the firmware to get it
1163          * to parse the Configuration File.  We don't use t4_fw_config_file()
1164          * because we want the ability to modify various features after we've
1165          * processed the configuration file ...
1166          */
1167         memset(&caps_cmd, 0, sizeof(caps_cmd));
1168         caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1169                                            F_FW_CMD_REQUEST | F_FW_CMD_READ);
1170         caps_cmd.cfvalid_to_len16 =
1171                 cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1172                             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1173                             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
1174                             FW_LEN16(caps_cmd));
1175         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
1176                          &caps_cmd);
1177         /*
1178          * If the CAPS_CONFIG failed with an ENOENT (for a Firmware
1179          * Configuration File in FLASH), our last gasp effort is to use the
1180          * Firmware Configuration File which is embedded in the firmware.  A
1181          * very few early versions of the firmware didn't have one embedded
1182          * but we can ignore those.
1183          */
1184         if (ret == -ENOENT) {
1185                 dev_info(adapter, "%s: Going for embedded config in firmware..\n",
1186                          __func__);
1187
1188                 memset(&caps_cmd, 0, sizeof(caps_cmd));
1189                 caps_cmd.op_to_write =
1190                         cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1191                                     F_FW_CMD_REQUEST | F_FW_CMD_READ);
1192                 caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd));
1193                 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
1194                                  sizeof(caps_cmd), &caps_cmd);
1195                 strcpy(config_name, "Firmware Default");
1196         }
1197
1198         config_issued = 1;
1199         if (ret < 0)
1200                 goto bye;
1201
1202         finiver = be32_to_cpu(caps_cmd.finiver);
1203         finicsum = be32_to_cpu(caps_cmd.finicsum);
1204         cfcsum = be32_to_cpu(caps_cmd.cfcsum);
1205         if (finicsum != cfcsum)
1206                 dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n",
1207                          finicsum, cfcsum);
1208
1209         /*
1210          * If we're a pure NIC driver then disable all offloading facilities.
1211          * This will allow the firmware to optimize aspects of the hardware
1212          * configuration which will result in improved performance.
1213          */
1214         caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
1215         caps_cmd.toecaps = 0;
1216         caps_cmd.iscsicaps = 0;
1217         caps_cmd.rdmacaps = 0;
1218         caps_cmd.fcoecaps = 0;
1219         caps_cmd.cryptocaps = 0;
1220
1221         /*
1222          * And now tell the firmware to use the configuration we just loaded.
1223          */
1224         caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1225                                            F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1226         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
1227         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
1228                          NULL);
1229         if (ret < 0) {
1230                 dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n",
1231                          -ret);
1232                 goto bye;
1233         }
1234
1235         /*
1236          * Tweak configuration based on system architecture, etc.
1237          */
1238         ret = adap_init0_tweaks(adapter);
1239         if (ret < 0) {
1240                 dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret);
1241                 goto bye;
1242         }
1243
1244         /*
1245          * And finally tell the firmware to initialize itself using the
1246          * parameters from the Configuration File.
1247          */
1248         ret = t4_fw_initialize(adapter, adapter->mbox);
1249         if (ret < 0) {
1250                 dev_warn(adapter, "Initializing Firmware failed, error %d\n",
1251                          -ret);
1252                 goto bye;
1253         }
1254
1255         /*
1256          * Return successfully and note that we're operating with parameters
1257          * not supplied by the driver, rather than from hard-wired
1258          * initialization constants buried in the driver.
1259          */
1260         dev_info(adapter,
1261                  "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
1262                  config_name, finiver, cfcsum);
1263
1264         return 0;
1265
1266         /*
1267          * Something bad happened.  Return the error ...  (If the "error"
1268          * is that there's no Configuration File on the adapter we don't
1269          * want to issue a warning since this is fairly common.)
1270          */
1271 bye:
1272         if (config_issued && ret != -ENOENT)
1273                 dev_warn(adapter, "\"%s\" configuration file error %d\n",
1274                          config_name, -ret);
1275
1276         dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret);
1277         return ret;
1278 }
1279
1280 static int adap_init0(struct adapter *adap)
1281 {
1282         struct fw_caps_config_cmd caps_cmd;
1283         int ret = 0;
1284         u32 v, port_vec;
1285         enum dev_state state;
1286         u32 params[7], val[7];
1287         int reset = 1;
1288         int mbox = adap->mbox;
1289
1290         /*
1291          * Contact FW, advertising Master capability.
1292          */
1293         ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
1294         if (ret < 0) {
1295                 dev_err(adap, "%s: could not connect to FW, error %d\n",
1296                         __func__, -ret);
1297                 goto bye;
1298         }
1299
1300         CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__,
1301                          adap->mbox, ret);
1302
1303         if (ret == mbox)
1304                 adap->flags |= MASTER_PF;
1305
1306         if (state == DEV_STATE_INIT) {
1307                 /*
1308                  * Force halt and reset FW because a previous instance may have
1309                  * exited abnormally without properly shutting down
1310                  */
1311                 ret = t4_fw_halt(adap, adap->mbox, reset);
1312                 if (ret < 0) {
1313                         dev_err(adap, "Failed to halt. Exit.\n");
1314                         goto bye;
1315                 }
1316
1317                 ret = t4_fw_restart(adap, adap->mbox, reset);
1318                 if (ret < 0) {
1319                         dev_err(adap, "Failed to restart. Exit.\n");
1320                         goto bye;
1321                 }
1322                 state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
1323         }
1324
1325         t4_get_version_info(adap);
1326
1327         ret = t4_get_core_clock(adap, &adap->params.vpd);
1328         if (ret < 0) {
1329                 dev_err(adap, "%s: could not get core clock, error %d\n",
1330                         __func__, -ret);
1331                 goto bye;
1332         }
1333
1334         /*
1335          * If the firmware is initialized already (and we're not forcing a
1336          * master initialization), note that we're living with existing
1337          * adapter parameters.  Otherwise, it's time to try initializing the
1338          * adapter ...
1339          */
1340         if (state == DEV_STATE_INIT) {
1341                 dev_info(adap, "Coming up as %s: Adapter already initialized\n",
1342                          adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
1343         } else {
1344                 dev_info(adap, "Coming up as MASTER: Initializing adapter\n");
1345
1346                 ret = adap_init0_config(adap, reset);
1347                 if (ret == -ENOENT) {
1348                         dev_err(adap,
1349                                 "No Configuration File present on adapter. Using hard-wired configuration parameters.\n");
1350                         goto bye;
1351                 }
1352         }
1353         if (ret < 0) {
1354                 dev_err(adap, "could not initialize adapter, error %d\n", -ret);
1355                 goto bye;
1356         }
1357
1358         /* Now that we've successfully configured and initialized the adapter
1359          * (or found it already initialized), we can ask the Firmware what
1360          * resources it has provisioned for us.
1361          */
1362         ret = t4_get_pfres(adap);
1363         if (ret) {
1364                 dev_err(adap->pdev_dev,
1365                         "Unable to retrieve resource provisioning info\n");
1366                 goto bye;
1367         }
1368
1369         /* Find out what ports are available to us. */
1370         v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
1371             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
1372         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
1373         if (ret < 0) {
1374                 dev_err(adap, "%s: failure in t4_query_params; error = %d\n",
1375                         __func__, ret);
1376                 goto bye;
1377         }
1378
1379         adap->params.nports = hweight32(port_vec);
1380         adap->params.portvec = port_vec;
1381
1382         dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
1383                   adap->params.nports);
1384
1385         /*
1386          * Give the SGE code a chance to pull in anything that it needs ...
1387          * Note that this must be called after we retrieve our VPD parameters
1388          * in order to know how to convert core ticks to seconds, etc.
1389          */
1390         ret = t4_sge_init(adap);
1391         if (ret < 0) {
1392                 dev_err(adap, "t4_sge_init failed with error %d\n",
1393                         -ret);
1394                 goto bye;
1395         }
1396
1397         /*
1398          * Grab some of our basic fundamental operating parameters.
1399          */
1400         params[0] = CXGBE_FW_PARAM_PFVF(L2T_START);
1401         params[1] = CXGBE_FW_PARAM_PFVF(L2T_END);
1402         params[2] = CXGBE_FW_PARAM_PFVF(FILTER_START);
1403         params[3] = CXGBE_FW_PARAM_PFVF(FILTER_END);
1404         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 4, params, val);
1405         if (ret < 0)
1406                 goto bye;
1407         adap->l2t_start = val[0];
1408         adap->l2t_end = val[1];
1409         adap->tids.ftid_base = val[2];
1410         adap->tids.nftids = val[3] - val[2] + 1;
1411
1412         params[0] = CXGBE_FW_PARAM_PFVF(CLIP_START);
1413         params[1] = CXGBE_FW_PARAM_PFVF(CLIP_END);
1414         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
1415         if (ret < 0)
1416                 goto bye;
1417         adap->clipt_start = val[0];
1418         adap->clipt_end = val[1];
1419
1420         /*
1421          * Get device capabilities so we can determine what resources we need
1422          * to manage.
1423          */
1424         memset(&caps_cmd, 0, sizeof(caps_cmd));
1425         caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1426                                      F_FW_CMD_REQUEST | F_FW_CMD_READ);
1427         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
1428         ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
1429                          &caps_cmd);
1430         if (ret < 0)
1431                 goto bye;
1432
1433         if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
1434             is_t6(adap->params.chip)) {
1435                 if (cxgbe_init_hash_filter(adap) < 0)
1436                         goto bye;
1437         }
1438
1439         /* See if FW supports FW_FILTER2 work request */
1440         if (is_t4(adap->params.chip)) {
1441                 adap->params.filter2_wr_support = 0;
1442         } else {
1443                 params[0] = CXGBE_FW_PARAM_DEV(FILTER2_WR);
1444                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1445                                       1, params, val);
1446                 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
1447         }
1448
1449         /* Check if FW supports returning vin.
1450          * If this is not supported, driver will interpret
1451          * these values from viid.
1452          */
1453         params[0] = CXGBE_FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
1454         ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1455                               1, params, val);
1456         adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
1457
1458         /* query tid-related parameters */
1459         params[0] = CXGBE_FW_PARAM_DEV(NTID);
1460         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
1461                               params, val);
1462         if (ret < 0)
1463                 goto bye;
1464         adap->tids.ntids = val[0];
1465         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
1466
1467         /* If we're running on newer firmware, let it know that we're
1468          * prepared to deal with encapsulated CPL messages.  Older
1469          * firmware won't understand this and we'll just get
1470          * unencapsulated messages ...
1471          */
1472         params[0] = CXGBE_FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1473         val[0] = 1;
1474         (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1475
1476         /*
1477          * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
1478          * capability.  Earlier versions of the firmware didn't have the
1479          * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
1480          * permission to use ULPTX MEMWRITE DSGL.
1481          */
1482         if (is_t4(adap->params.chip)) {
1483                 adap->params.ulptx_memwrite_dsgl = false;
1484         } else {
1485                 params[0] = CXGBE_FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
1486                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1487                                       1, params, val);
1488                 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
1489         }
1490
1491         /* Query for max number of packets that can be coalesced for Tx */
1492         params[0] = CXGBE_FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR);
1493         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1494         if (!ret && val[0] > 0)
1495                 adap->params.max_tx_coalesce_num = val[0];
1496         else
1497                 adap->params.max_tx_coalesce_num = ETH_COALESCE_PKT_NUM;
1498
1499         params[0] = CXGBE_FW_PARAM_DEV(VI_ENABLE_INGRESS_AFTER_LINKUP);
1500         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1501         adap->params.vi_enable_rx = (ret == 0 && val[0] != 0);
1502
1503         /*
1504          * The MTU/MSS Table is initialized by now, so load their values.  If
1505          * we're initializing the adapter, then we'll make any modifications
1506          * we want to the MTU/MSS Table and also initialize the congestion
1507          * parameters.
1508          */
1509         t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
1510         if (state != DEV_STATE_INIT) {
1511                 int i;
1512
1513                 /*
1514                  * The default MTU Table contains values 1492 and 1500.
1515                  * However, for TCP, it's better to have two values which are
1516                  * a multiple of 8 +/- 4 bytes apart near this popular MTU.
1517                  * This allows us to have a TCP Data Payload which is a
1518                  * multiple of 8 regardless of what combination of TCP Options
1519                  * are in use (always a multiple of 4 bytes) which is
1520                  * important for performance reasons.  For instance, if no
1521                  * options are in use, then we have a 20-byte IP header and a
1522                  * 20-byte TCP header.  In this case, a 1500-byte MSS would
1523                  * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
1524                  * which is not a multiple of 8.  So using an MSS of 1488 in
1525                  * this case results in a TCP Data Payload of 1448 bytes which
1526                  * is a multiple of 8.  On the other hand, if 12-byte TCP Time
1527                  * Stamps have been negotiated, then an MTU of 1500 bytes
1528                  * results in a TCP Data Payload of 1448 bytes which, as
1529                  * above, is a multiple of 8 bytes ...
1530                  */
1531                 for (i = 0; i < NMTUS; i++)
1532                         if (adap->params.mtus[i] == 1492) {
1533                                 adap->params.mtus[i] = 1488;
1534                                 break;
1535                         }
1536
1537                 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
1538                              adap->params.b_wnd);
1539         }
1540         t4_init_sge_params(adap);
1541         ret = configure_filter_mode_mask(adap);
1542         if (ret < 0)
1543                 goto bye;
1544         t4_init_tp_params(adap);
1545         configure_pcie_ext_tag(adap);
1546         configure_vlan_types(adap);
1547         cxgbe_configure_max_ethqsets(adap);
1548
1549         adap->params.drv_memwin = MEMWIN_NIC;
1550         adap->flags |= FW_OK;
1551         dev_debug(adap, "%s: returning zero..\n", __func__);
1552         return 0;
1553
1554         /*
1555          * Something bad happened.  If a command timed out or failed with EIO
1556          * FW does not operate within its spec or something catastrophic
1557          * happened to HW/FW, stop issuing commands.
1558          */
1559 bye:
1560         if (ret != -ETIMEDOUT && ret != -EIO)
1561                 t4_fw_bye(adap, adap->mbox);
1562         return ret;
1563 }
1564
1565 /**
1566  * t4_os_portmod_changed - handle port module changes
1567  * @adap: the adapter associated with the module change
1568  * @port_id: the port index whose module status has changed
1569  *
1570  * This is the OS-dependent handler for port module changes.  It is
1571  * invoked when a port module is removed or inserted for any OS-specific
1572  * processing.
1573  */
1574 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
1575 {
1576         static const char * const mod_str[] = {
1577                 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
1578         };
1579
1580         const struct port_info *pi = adap2pinfo(adap, port_id);
1581
1582         if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_NONE)
1583                 dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
1584         else if (pi->link_cfg.mod_type < ARRAY_SIZE(mod_str))
1585                 dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
1586                          mod_str[pi->link_cfg.mod_type]);
1587         else if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
1588                 dev_info(adap, "Port%d: unsupported port module inserted\n",
1589                          pi->port_id);
1590         else if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
1591                 dev_info(adap, "Port%d: unknown port module inserted\n",
1592                          pi->port_id);
1593         else if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_ERROR)
1594                 dev_info(adap, "Port%d: transceiver module error\n",
1595                          pi->port_id);
1596         else
1597                 dev_info(adap, "Port%d: unknown module type %d inserted\n",
1598                          pi->port_id, pi->link_cfg.mod_type);
1599 }
1600
1601 void t4_os_link_changed(struct adapter *adap, int port_id)
1602 {
1603         struct port_info *pi = adap2pinfo(adap, port_id);
1604
1605         /* If link status has not changed or if firmware doesn't
1606          * support enabling/disabling VI's Rx path during runtime,
1607          * then return.
1608          */
1609         if (adap->params.vi_enable_rx == 0 ||
1610             pi->vi_en_rx == pi->link_cfg.link_ok)
1611                 return;
1612
1613         /* Don't enable VI Rx path, if link has been administratively
1614          * turned off.
1615          */
1616         if (pi->vi_en_tx == 0 && pi->vi_en_rx == 0)
1617                 return;
1618
1619         /* When link goes down, disable the port's Rx path to drop
1620          * Rx traffic closer to the wire, instead of processing it
1621          * further in the Rx pipeline. The Rx path will be re-enabled
1622          * once the link up message comes in firmware event queue.
1623          */
1624         pi->vi_en_rx = pi->link_cfg.link_ok;
1625         t4_enable_vi(adap, adap->mbox, pi->viid, pi->vi_en_rx, pi->vi_en_tx);
1626 }
1627
1628 bool cxgbe_force_linkup(struct adapter *adap)
1629 {
1630         if (is_pf4(adap))
1631                 return false;   /* force_linkup not required for pf driver */
1632
1633         return adap->devargs.force_link_up;
1634 }
1635
1636 /**
1637  * link_start - enable a port
1638  * @dev: the port to enable
1639  *
1640  * Performs the MAC and PHY actions needed to enable a port.
1641  */
1642 int cxgbe_link_start(struct port_info *pi)
1643 {
1644         struct adapter *adapter = pi->adapter;
1645         u64 conf_offloads;
1646         unsigned int mtu;
1647         int ret;
1648
1649         mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1650               (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
1651
1652         conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads;
1653
1654         /*
1655          * We do not set address filters and promiscuity here, the stack does
1656          * that step explicitly.
1657          */
1658         ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
1659                             !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
1660                             true);
1661         if (ret == 0) {
1662                 ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
1663                                 (u8 *)&pi->eth_dev->data->mac_addrs[0]);
1664                 if (ret >= 0) {
1665                         pi->xact_addr_filt = ret;
1666                         ret = 0;
1667                 }
1668         }
1669         if (ret == 0 && is_pf4(adapter))
1670                 ret = t4_link_l1cfg(pi, pi->link_cfg.admin_caps);
1671         if (ret == 0) {
1672                 /* Disable VI Rx until link up message is received in
1673                  * firmware event queue, if firmware supports enabling/
1674                  * disabling VI Rx at runtime.
1675                  */
1676                 pi->vi_en_rx = adapter->params.vi_enable_rx ? 0 : 1;
1677                 pi->vi_en_tx = 1;
1678                 ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
1679                                           pi->vi_en_rx, pi->vi_en_tx, false);
1680         }
1681
1682         if (ret == 0 && cxgbe_force_linkup(adapter))
1683                 pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1684         return ret;
1685 }
1686
1687 /**
1688  * cxgbe_write_rss_conf - flash the RSS configuration for a given port
1689  * @pi: the port
1690  * @rss_hf: Hash configuration to apply
1691  */
1692 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
1693 {
1694         struct adapter *adapter = pi->adapter;
1695         const struct sge_eth_rxq *rxq;
1696         u64 flags = 0;
1697         u16 rss;
1698         int err;
1699
1700         /*  Should never be called before setting up sge eth rx queues */
1701         if (!(adapter->flags & FULL_INIT_DONE)) {
1702                 dev_err(adap, "%s No RXQs available on port %d\n",
1703                         __func__, pi->port_id);
1704                 return -EINVAL;
1705         }
1706
1707         /* Don't allow unsupported hash functions */
1708         if (rss_hf & ~CXGBE_RSS_HF_ALL)
1709                 return -EINVAL;
1710
1711         if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
1712                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
1713
1714         if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1715                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
1716
1717         if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1718                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
1719                          F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1720
1721         if (rss_hf & CXGBE_RSS_HF_IPV6_MASK)
1722                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
1723
1724         if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK)
1725                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
1726                          F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
1727
1728         if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK)
1729                 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
1730                          F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
1731                          F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1732
1733         rxq = &adapter->sge.ethrxq[pi->first_rxqset];
1734         rss = rxq[0].rspq.abs_id;
1735
1736         /* If Tunnel All Lookup isn't specified in the global RSS
1737          * Configuration, then we need to specify a default Ingress
1738          * Queue for any ingress packets which aren't hashed.  We'll
1739          * use our first ingress queue ...
1740          */
1741         err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
1742                                flags, rss);
1743         return err;
1744 }
1745
1746 /**
1747  * cxgbe_write_rss - write the RSS table for a given port
1748  * @pi: the port
1749  * @queues: array of queue indices for RSS
1750  *
1751  * Sets up the portion of the HW RSS table for the port's VI to distribute
1752  * packets to the Rx queues in @queues.
1753  */
1754 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
1755 {
1756         u16 *rss;
1757         int i, err;
1758         struct adapter *adapter = pi->adapter;
1759         const struct sge_eth_rxq *rxq;
1760
1761         /*  Should never be called before setting up sge eth rx queues */
1762         BUG_ON(!(adapter->flags & FULL_INIT_DONE));
1763
1764         rxq = &adapter->sge.ethrxq[pi->first_rxqset];
1765         rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
1766         if (!rss)
1767                 return -ENOMEM;
1768
1769         /* map the queue indices to queue ids */
1770         for (i = 0; i < pi->rss_size; i++, queues++)
1771                 rss[i] = rxq[*queues].rspq.abs_id;
1772
1773         err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
1774                                   pi->rss_size, rss, pi->rss_size);
1775         rte_free(rss);
1776         return err;
1777 }
1778
1779 /**
1780  * setup_rss - configure RSS
1781  * @adapter: the adapter
1782  *
1783  * Sets up RSS to distribute packets to multiple receive queues.  We
1784  * configure the RSS CPU lookup table to distribute to the number of HW
1785  * receive queues, and the response queue lookup table to narrow that
1786  * down to the response queues actually configured for each port.
1787  * We always configure the RSS mapping for all ports since the mapping
1788  * table has plenty of entries.
1789  */
1790 int cxgbe_setup_rss(struct port_info *pi)
1791 {
1792         int j, err;
1793         struct adapter *adapter = pi->adapter;
1794
1795         dev_debug(adapter, "%s:  pi->rss_size = %u; pi->n_rx_qsets = %u\n",
1796                   __func__, pi->rss_size, pi->n_rx_qsets);
1797
1798         if (!(pi->flags & PORT_RSS_DONE)) {
1799                 if (adapter->flags & FULL_INIT_DONE) {
1800                         /* Fill default values with equal distribution */
1801                         for (j = 0; j < pi->rss_size; j++)
1802                                 pi->rss[j] = j % pi->n_rx_qsets;
1803
1804                         err = cxgbe_write_rss(pi, pi->rss);
1805                         if (err)
1806                                 return err;
1807
1808                         err = cxgbe_write_rss_conf(pi, pi->rss_hf);
1809                         if (err)
1810                                 return err;
1811                         pi->flags |= PORT_RSS_DONE;
1812                 }
1813         }
1814         return 0;
1815 }
1816
1817 /*
1818  * Enable NAPI scheduling and interrupt generation for all Rx queues.
1819  */
1820 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
1821 {
1822         /* 0-increment GTS to start the timer and enable interrupts */
1823         t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
1824                                           T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
1825                      V_SEINTARM(q->intr_params) |
1826                      V_INGRESSQID(q->cntxt_id));
1827 }
1828
1829 void cxgbe_enable_rx_queues(struct port_info *pi)
1830 {
1831         struct adapter *adap = pi->adapter;
1832         struct sge *s = &adap->sge;
1833         unsigned int i;
1834
1835         for (i = 0; i < pi->n_rx_qsets; i++)
1836                 enable_rx(adap, &s->ethrxq[pi->first_rxqset + i].rspq);
1837 }
1838
1839 /**
1840  * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps.
1841  * @port_type: Firmware Port Type
1842  * @fw_caps: Firmware Port Capabilities
1843  * @speed_caps: Device Info Speed Capabilities
1844  *
1845  * Translate a Firmware Port Capabilities specification to Device Info
1846  * Speed Capabilities.
1847  */
1848 static void fw_caps_to_speed_caps(enum fw_port_type port_type,
1849                                   unsigned int fw_caps,
1850                                   u32 *speed_caps)
1851 {
1852 #define SET_SPEED(__speed_name) \
1853         do { \
1854                 *speed_caps |= ETH_LINK_ ## __speed_name; \
1855         } while (0)
1856
1857 #define FW_CAPS_TO_SPEED(__fw_name) \
1858         do { \
1859                 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1860                         SET_SPEED(__fw_name); \
1861         } while (0)
1862
1863         switch (port_type) {
1864         case FW_PORT_TYPE_BT_SGMII:
1865         case FW_PORT_TYPE_BT_XFI:
1866         case FW_PORT_TYPE_BT_XAUI:
1867                 FW_CAPS_TO_SPEED(SPEED_100M);
1868                 FW_CAPS_TO_SPEED(SPEED_1G);
1869                 FW_CAPS_TO_SPEED(SPEED_10G);
1870                 break;
1871
1872         case FW_PORT_TYPE_KX4:
1873         case FW_PORT_TYPE_KX:
1874         case FW_PORT_TYPE_FIBER_XFI:
1875         case FW_PORT_TYPE_FIBER_XAUI:
1876         case FW_PORT_TYPE_SFP:
1877         case FW_PORT_TYPE_QSFP_10G:
1878         case FW_PORT_TYPE_QSA:
1879                 FW_CAPS_TO_SPEED(SPEED_1G);
1880                 FW_CAPS_TO_SPEED(SPEED_10G);
1881                 break;
1882
1883         case FW_PORT_TYPE_KR:
1884                 SET_SPEED(SPEED_10G);
1885                 break;
1886
1887         case FW_PORT_TYPE_BP_AP:
1888         case FW_PORT_TYPE_BP4_AP:
1889                 SET_SPEED(SPEED_1G);
1890                 SET_SPEED(SPEED_10G);
1891                 break;
1892
1893         case FW_PORT_TYPE_BP40_BA:
1894         case FW_PORT_TYPE_QSFP:
1895                 SET_SPEED(SPEED_40G);
1896                 break;
1897
1898         case FW_PORT_TYPE_CR_QSFP:
1899         case FW_PORT_TYPE_SFP28:
1900         case FW_PORT_TYPE_KR_SFP28:
1901                 FW_CAPS_TO_SPEED(SPEED_1G);
1902                 FW_CAPS_TO_SPEED(SPEED_10G);
1903                 FW_CAPS_TO_SPEED(SPEED_25G);
1904                 break;
1905
1906         case FW_PORT_TYPE_CR2_QSFP:
1907                 SET_SPEED(SPEED_50G);
1908                 break;
1909
1910         case FW_PORT_TYPE_KR4_100G:
1911         case FW_PORT_TYPE_CR4_QSFP:
1912                 FW_CAPS_TO_SPEED(SPEED_25G);
1913                 FW_CAPS_TO_SPEED(SPEED_40G);
1914                 FW_CAPS_TO_SPEED(SPEED_50G);
1915                 FW_CAPS_TO_SPEED(SPEED_100G);
1916                 break;
1917
1918         default:
1919                 break;
1920         }
1921
1922 #undef FW_CAPS_TO_SPEED
1923 #undef SET_SPEED
1924 }
1925
1926 /**
1927  * cxgbe_get_speed_caps - Fetch supported speed capabilities
1928  * @pi: Underlying port's info
1929  * @speed_caps: Device Info speed capabilities
1930  *
1931  * Fetch supported speed capabilities of the underlying port.
1932  */
1933 void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
1934 {
1935         *speed_caps = 0;
1936
1937         fw_caps_to_speed_caps(pi->link_cfg.port_type, pi->link_cfg.pcaps,
1938                               speed_caps);
1939
1940         if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
1941                 *speed_caps |= ETH_LINK_SPEED_FIXED;
1942 }
1943
1944 /**
1945  * cxgbe_set_link_status - Set device link up or down.
1946  * @pi: Underlying port's info
1947  * @status: 0 - down, 1 - up
1948  *
1949  * Set the device link up or down.
1950  */
1951 int cxgbe_set_link_status(struct port_info *pi, bool status)
1952 {
1953         struct adapter *adapter = pi->adapter;
1954         int err = 0;
1955
1956         /* Wait for link up message from firmware to enable Rx path,
1957          * if firmware supports enabling/disabling VI Rx at runtime.
1958          */
1959         pi->vi_en_rx = adapter->params.vi_enable_rx ? 0 : status;
1960         pi->vi_en_tx = status;
1961         err = t4_enable_vi(adapter, adapter->mbox, pi->viid, pi->vi_en_rx,
1962                            pi->vi_en_tx);
1963         if (err) {
1964                 dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
1965                 return err;
1966         }
1967
1968         if (!status)
1969                 t4_reset_link_config(adapter, pi->pidx);
1970
1971         return 0;
1972 }
1973
1974 /**
1975  * cxgb_up - enable the adapter
1976  * @adap: adapter being enabled
1977  *
1978  * Called when the first port is enabled, this function performs the
1979  * actions necessary to make an adapter operational, such as completing
1980  * the initialization of HW modules, and enabling interrupts.
1981  */
1982 int cxgbe_up(struct adapter *adap)
1983 {
1984         enable_rx(adap, &adap->sge.fw_evtq);
1985         t4_sge_tx_monitor_start(adap);
1986         if (is_pf4(adap))
1987                 t4_intr_enable(adap);
1988         adap->flags |= FULL_INIT_DONE;
1989
1990         /* TODO: deadman watchdog ?? */
1991         return 0;
1992 }
1993
1994 /*
1995  * Close the port
1996  */
1997 int cxgbe_down(struct port_info *pi)
1998 {
1999         return cxgbe_set_link_status(pi, false);
2000 }
2001
2002 /*
2003  * Release resources when all the ports have been stopped.
2004  */
2005 void cxgbe_close(struct adapter *adapter)
2006 {
2007         if (adapter->flags & FULL_INIT_DONE) {
2008                 tid_free(&adapter->tids);
2009                 t4_cleanup_mpstcam(adapter);
2010                 t4_cleanup_clip_tbl(adapter);
2011                 t4_cleanup_l2t(adapter);
2012                 t4_cleanup_smt(adapter);
2013                 if (is_pf4(adapter))
2014                         t4_intr_disable(adapter);
2015                 t4_sge_tx_monitor_stop(adapter);
2016                 t4_free_sge_resources(adapter);
2017                 adapter->flags &= ~FULL_INIT_DONE;
2018         }
2019
2020         cxgbe_cfg_queues_free(adapter);
2021
2022         if (is_pf4(adapter) && (adapter->flags & FW_OK))
2023                 t4_fw_bye(adapter, adapter->mbox);
2024 }
2025
2026 static void adap_smt_index(struct adapter *adapter, u32 *smt_start_idx,
2027                            u32 *smt_size)
2028 {
2029         u32 params[2], smt_val[2];
2030         int ret;
2031
2032         params[0] = CXGBE_FW_PARAM_PFVF(GET_SMT_START);
2033         params[1] = CXGBE_FW_PARAM_PFVF(GET_SMT_SIZE);
2034
2035         ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2036                               2, params, smt_val);
2037
2038         /* if FW doesn't recognize this command then set it to default setting
2039          * which is start index as 0 and size as 256.
2040          */
2041         if (ret < 0) {
2042                 *smt_start_idx = 0;
2043                 *smt_size = SMT_SIZE;
2044         } else {
2045                 *smt_start_idx = smt_val[0];
2046                 /* smt size can be zero, if nsmt is not yet configured in
2047                  * the config file or set as zero, then configure all the
2048                  * remaining entries to this PF itself.
2049                  */
2050                 if (!smt_val[1])
2051                         *smt_size = SMT_SIZE - *smt_start_idx;
2052                 else
2053                         *smt_size = smt_val[1];
2054         }
2055 }
2056
2057 int cxgbe_probe(struct adapter *adapter)
2058 {
2059         u32 smt_start_idx, smt_size;
2060         struct port_info *pi;
2061         int func, i;
2062         int err = 0;
2063         u32 whoami;
2064         int chip;
2065
2066         whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2067         chip = t4_get_chip_type(adapter,
2068                         CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id));
2069         if (chip < 0)
2070                 return chip;
2071
2072         func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
2073                G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
2074
2075         adapter->mbox = func;
2076         adapter->pf = func;
2077
2078         t4_os_lock_init(&adapter->mbox_lock);
2079         TAILQ_INIT(&adapter->mbox_list);
2080         t4_os_lock_init(&adapter->win0_lock);
2081
2082         err = t4_prep_adapter(adapter);
2083         if (err)
2084                 return err;
2085
2086         setup_memwin(adapter);
2087         err = adap_init0(adapter);
2088         if (err) {
2089                 dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
2090                         __func__, err);
2091                 goto out_free;
2092         }
2093
2094         if (!is_t4(adapter->params.chip)) {
2095                 /*
2096                  * The userspace doorbell BAR is split evenly into doorbell
2097                  * regions, each associated with an egress queue.  If this
2098                  * per-queue region is large enough (at least UDBS_SEG_SIZE)
2099                  * then it can be used to submit a tx work request with an
2100                  * implied doorbell.  Enable write combining on the BAR if
2101                  * there is room for such work requests.
2102                  */
2103                 int s_qpp, qpp, num_seg;
2104
2105                 s_qpp = (S_QUEUESPERPAGEPF0 +
2106                         (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) *
2107                         adapter->pf);
2108                 qpp = 1 << ((t4_read_reg(adapter,
2109                                 A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)
2110                                 & M_QUEUESPERPAGEPF0);
2111                 num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE;
2112                 if (qpp > num_seg)
2113                         dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
2114
2115                 adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
2116                 if (!adapter->bar2) {
2117                         dev_err(adapter, "cannot map device bar2 region\n");
2118                         err = -ENOMEM;
2119                         goto out_free;
2120                 }
2121                 t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) |
2122                              V_STATMODE(0));
2123         }
2124
2125         for_each_port(adapter, i) {
2126                 const unsigned int numa_node = rte_socket_id();
2127                 char name[RTE_ETH_NAME_MAX_LEN];
2128                 struct rte_eth_dev *eth_dev;
2129
2130                 snprintf(name, sizeof(name), "%s_%d",
2131                          adapter->pdev->device.name, i);
2132
2133                 if (i == 0) {
2134                         /* First port is already allocated by DPDK */
2135                         eth_dev = adapter->eth_dev;
2136                         goto allocate_mac;
2137                 }
2138
2139                 /*
2140                  * now do all data allocation - for eth_dev structure,
2141                  * and internal (private) data for the remaining ports
2142                  */
2143
2144                 /* reserve an ethdev entry */
2145                 eth_dev = rte_eth_dev_allocate(name);
2146                 if (!eth_dev)
2147                         goto out_free;
2148
2149                 eth_dev->data->dev_private =
2150                         rte_zmalloc_socket(name, sizeof(struct port_info),
2151                                            RTE_CACHE_LINE_SIZE, numa_node);
2152                 if (!eth_dev->data->dev_private)
2153                         goto out_free;
2154
2155 allocate_mac:
2156                 pi = eth_dev->data->dev_private;
2157                 adapter->port[i] = pi;
2158                 pi->eth_dev = eth_dev;
2159                 pi->adapter = adapter;
2160                 pi->xact_addr_filt = -1;
2161                 pi->port_id = i;
2162                 pi->pidx = i;
2163
2164                 pi->eth_dev->device = &adapter->pdev->device;
2165                 pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
2166                 pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
2167                 pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
2168
2169                 rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
2170
2171                 pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
2172                                                         RTE_ETHER_ADDR_LEN, 0);
2173                 if (!pi->eth_dev->data->mac_addrs) {
2174                         dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
2175                                 __func__);
2176                         err = -1;
2177                         goto out_free;
2178                 }
2179
2180                 if (i > 0) {
2181                         /* First port will be notified by upper layer */
2182                         rte_eth_dev_probing_finish(eth_dev);
2183                 }
2184         }
2185
2186         if (adapter->flags & FW_OK) {
2187                 err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0);
2188                 if (err) {
2189                         dev_err(adapter, "%s: t4_port_init failed with err %d\n",
2190                                 __func__, err);
2191                         goto out_free;
2192                 }
2193         }
2194
2195         err = cxgbe_cfg_queues(adapter->eth_dev);
2196         if (err)
2197                 goto out_free;
2198
2199         cxgbe_print_adapter_info(adapter);
2200         cxgbe_print_port_info(adapter);
2201
2202         adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
2203                                           adapter->clipt_end);
2204         if (!adapter->clipt) {
2205                 /* We tolerate a lack of clip_table, giving up some
2206                  * functionality
2207                  */
2208                 dev_warn(adapter, "could not allocate CLIP. Continuing\n");
2209         }
2210
2211         adap_smt_index(adapter, &smt_start_idx, &smt_size);
2212         adapter->smt = t4_init_smt(smt_start_idx, smt_size);
2213         if (!adapter->smt)
2214                 dev_warn(adapter, "could not allocate SMT, continuing\n");
2215
2216         adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
2217         if (!adapter->l2t) {
2218                 /* We tolerate a lack of L2T, giving up some functionality */
2219                 dev_warn(adapter, "could not allocate L2T. Continuing\n");
2220         }
2221
2222         if (tid_init(&adapter->tids) < 0) {
2223                 /* Disable filtering support */
2224                 dev_warn(adapter, "could not allocate TID table, "
2225                          "filter support disabled. Continuing\n");
2226         }
2227
2228         t4_os_lock_init(&adapter->flow_lock);
2229
2230         adapter->mpstcam = t4_init_mpstcam(adapter);
2231         if (!adapter->mpstcam)
2232                 dev_warn(adapter, "could not allocate mps tcam table."
2233                          " Continuing\n");
2234
2235         if (is_hashfilter(adapter)) {
2236                 if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
2237                         u32 hash_base, hash_reg;
2238
2239                         hash_reg = A_LE_DB_TID_HASHBASE;
2240                         hash_base = t4_read_reg(adapter, hash_reg);
2241                         adapter->tids.hash_base = hash_base / 4;
2242                 }
2243         } else {
2244                 /* Disable hash filtering support */
2245                 dev_warn(adapter,
2246                          "Maskless filter support disabled. Continuing\n");
2247         }
2248
2249         err = cxgbe_init_rss(adapter);
2250         if (err)
2251                 goto out_free;
2252
2253         return 0;
2254
2255 out_free:
2256         cxgbe_cfg_queues_free(adapter);
2257
2258         for_each_port(adapter, i) {
2259                 pi = adap2pinfo(adapter, i);
2260                 if (pi->viid != 0)
2261                         t4_free_vi(adapter, adapter->mbox, adapter->pf,
2262                                    0, pi->viid);
2263                 rte_eth_dev_release_port(pi->eth_dev);
2264         }
2265
2266         if (adapter->flags & FW_OK)
2267                 t4_fw_bye(adapter, adapter->mbox);
2268         return -err;
2269 }