1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
14 #include <netinet/in.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_ethdev_pci.h>
32 #include <rte_random.h>
34 #include <rte_kvargs.h>
36 #include "base/common.h"
37 #include "base/t4_regs.h"
38 #include "base/t4_msg.h"
40 #include "cxgbe_pfvf.h"
46 static const u16 cxgbe_filter_mode_features[] = {
47 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE |
49 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE |
51 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS |
53 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS |
55 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_PORT |
57 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_TOS |
59 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VLAN |
61 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VNIC_ID |
63 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VLAN |
65 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VNIC_ID |
67 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_VLAN | F_PORT |
69 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_VNIC_ID | F_PORT |
71 (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_PROTOCOL | F_TOS |
73 (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_PORT),
74 (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_FCOE),
75 (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_PORT),
76 (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_FCOE),
77 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_PORT),
78 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_FCOE),
79 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID |
81 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID |
83 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_VLAN | F_PORT |
85 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_VNIC_ID | F_PORT |
87 (F_FRAGMENTATION | F_MPSHITTYPE | F_TOS | F_VLAN | F_PORT | F_FCOE),
88 (F_FRAGMENTATION | F_MPSHITTYPE | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
89 (F_FRAGMENTATION | F_MPSHITTYPE | F_VLAN | F_VNIC_ID | F_FCOE),
90 (F_FRAGMENTATION | F_MACMATCH | F_ETHERTYPE | F_PROTOCOL | F_PORT |
92 (F_FRAGMENTATION | F_MACMATCH | F_ETHERTYPE | F_TOS | F_PORT | F_FCOE),
93 (F_FRAGMENTATION | F_MACMATCH | F_PROTOCOL | F_VLAN | F_PORT | F_FCOE),
94 (F_FRAGMENTATION | F_MACMATCH | F_PROTOCOL | F_VNIC_ID | F_PORT |
96 (F_FRAGMENTATION | F_MACMATCH | F_TOS | F_VLAN | F_PORT | F_FCOE),
97 (F_FRAGMENTATION | F_MACMATCH | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
98 (F_FRAGMENTATION | F_ETHERTYPE | F_VLAN | F_PORT | F_FCOE),
99 (F_FRAGMENTATION | F_ETHERTYPE | F_VNIC_ID | F_PORT | F_FCOE),
100 (F_FRAGMENTATION | F_PROTOCOL | F_TOS | F_VLAN | F_FCOE),
101 (F_FRAGMENTATION | F_PROTOCOL | F_TOS | F_VNIC_ID | F_FCOE),
102 (F_FRAGMENTATION | F_VLAN | F_VNIC_ID | F_PORT | F_FCOE),
103 (F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_PROTOCOL | F_PORT |
105 (F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS | F_PORT | F_FCOE),
106 (F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VLAN | F_PORT),
107 (F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VNIC_ID | F_PORT),
108 (F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VLAN | F_PORT),
109 (F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VNIC_ID | F_PORT),
110 (F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_PORT | F_FCOE),
111 (F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_PORT | F_FCOE),
112 (F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_PORT | F_FCOE),
113 (F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
114 (F_MPSHITTYPE | F_VLAN | F_VNIC_ID | F_PORT),
118 * Allocate a chunk of memory. The allocated memory is cleared.
120 void *t4_alloc_mem(size_t size)
122 return rte_zmalloc(NULL, size, 0);
126 * Free memory allocated through t4_alloc_mem().
128 void t4_free_mem(void *addr)
134 * Response queue handler for the FW event queue.
136 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
137 __rte_unused const struct pkt_gl *gl)
139 u8 opcode = ((const struct rss_header *)rsp)->opcode;
141 rsp++; /* skip RSS header */
144 * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
146 if (unlikely(opcode == CPL_FW4_MSG &&
147 ((const struct cpl_fw4_msg *)rsp)->type ==
150 opcode = ((const struct rss_header *)rsp)->opcode;
152 if (opcode != CPL_SGE_EGR_UPDATE) {
153 dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n",
159 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
161 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
162 const struct cpl_fw6_msg *msg = (const void *)rsp;
164 t4_handle_fw_rpl(q->adapter, msg->data);
165 } else if (opcode == CPL_ABORT_RPL_RSS) {
166 const struct cpl_abort_rpl_rss *p = (const void *)rsp;
168 cxgbe_hash_del_filter_rpl(q->adapter, p);
169 } else if (opcode == CPL_SET_TCB_RPL) {
170 const struct cpl_set_tcb_rpl *p = (const void *)rsp;
172 cxgbe_filter_rpl(q->adapter, p);
173 } else if (opcode == CPL_ACT_OPEN_RPL) {
174 const struct cpl_act_open_rpl *p = (const void *)rsp;
176 cxgbe_hash_filter_rpl(q->adapter, p);
177 } else if (opcode == CPL_L2T_WRITE_RPL) {
178 const struct cpl_l2t_write_rpl *p = (const void *)rsp;
180 cxgbe_do_l2t_write_rpl(q->adapter, p);
181 } else if (opcode == CPL_SMT_WRITE_RPL) {
182 const struct cpl_smt_write_rpl *p = (const void *)rsp;
184 cxgbe_do_smt_write_rpl(q->adapter, p);
186 dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
194 * Setup sge control queues to pass control information.
196 int cxgbe_setup_sge_ctrl_txq(struct adapter *adapter)
198 struct sge *s = &adapter->sge;
201 for_each_port(adapter, i) {
202 struct port_info *pi = adap2pinfo(adapter, i);
203 char name[RTE_ETH_NAME_MAX_LEN];
204 struct sge_ctrl_txq *q = &s->ctrlq[i];
207 err = t4_sge_alloc_ctrl_txq(adapter, q,
212 dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
216 snprintf(name, sizeof(name), "%s_ctrl_pool_%d",
217 pi->eth_dev->device->driver->name,
218 pi->eth_dev->data->port_id);
219 q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
222 RTE_MBUF_DEFAULT_BUF_SIZE,
227 "Can't create ctrl pool for port %d. Err: %d\n",
228 pi->eth_dev->data->port_id, err);
234 t4_free_sge_resources(adapter);
239 * cxgbe_poll_for_completion: Poll rxq for completion
241 * @ms: milliseconds to delay
242 * @cnt: number of times to poll
243 * @c: completion to check for 'done' status
245 * Polls the rxq for reples until completion is done or the count
248 int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int ms,
249 unsigned int cnt, struct t4_completion *c)
252 unsigned int work_done, budget = 32;
257 for (i = 0; i < cnt; i++) {
258 cxgbe_poll(q, NULL, budget, &work_done);
259 t4_os_lock(&c->lock);
261 t4_os_unlock(&c->lock);
264 t4_os_unlock(&c->lock);
270 int cxgbe_setup_sge_fwevtq(struct adapter *adapter)
272 struct sge *s = &adapter->sge;
276 err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev,
277 msi_idx, NULL, fwevtq_handler, -1, NULL, 0,
282 static int closest_timer(const struct sge *s, int time)
284 unsigned int i, match = 0;
285 int delta, min_delta = INT_MAX;
287 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
288 delta = time - s->timer_val[i];
291 if (delta < min_delta) {
299 static int closest_thres(const struct sge *s, int thres)
301 unsigned int i, match = 0;
302 int delta, min_delta = INT_MAX;
304 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
305 delta = thres - s->counter_val[i];
308 if (delta < min_delta) {
317 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
319 * @us: the hold-off time in us, or 0 to disable timer
320 * @cnt: the hold-off packet count, or 0 to disable counter
322 * Sets an Rx queue's interrupt hold-off time and packet count. At least
323 * one of the two needs to be enabled for the queue to generate interrupts.
325 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
328 struct adapter *adap = q->adapter;
329 unsigned int timer_val;
335 new_idx = closest_thres(&adap->sge, cnt);
336 if (q->desc && q->pktcnt_idx != new_idx) {
337 /* the queue has already been created, update it */
338 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
340 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
341 V_FW_PARAMS_PARAM_YZ(q->cntxt_id);
342 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
347 q->pktcnt_idx = new_idx;
350 timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER :
351 closest_timer(&adap->sge, us);
354 q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
356 q->intr_params = V_QINTR_TIMER_IDX(timer_val) |
357 V_QINTR_CNT_EN(cnt > 0);
362 * Allocate an active-open TID and set it to the supplied value.
364 int cxgbe_alloc_atid(struct tid_info *t, void *data)
368 t4_os_lock(&t->atid_lock);
370 union aopen_entry *p = t->afree;
372 atid = p - t->atid_tab;
377 t4_os_unlock(&t->atid_lock);
382 * Release an active-open TID.
384 void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
386 union aopen_entry *p = &t->atid_tab[atid];
388 t4_os_lock(&t->atid_lock);
392 t4_os_unlock(&t->atid_lock);
396 * Populate a TID_RELEASE WR. Caller must properly size the skb.
398 static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
400 struct cpl_tid_release *req;
402 req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
403 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
407 * Release a TID and inform HW. If we are unable to allocate the release
408 * message we defer to a work queue.
410 void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
411 unsigned short family)
413 struct rte_mbuf *mbuf;
414 struct adapter *adap = container_of(t, struct adapter, tids);
416 WARN_ON(tid >= t->ntids);
418 if (t->tid_tab[tid]) {
419 t->tid_tab[tid] = NULL;
420 rte_atomic32_dec(&t->conns_in_use);
421 if (t->hash_base && tid >= t->hash_base) {
422 if (family == FILTER_TYPE_IPV4)
423 rte_atomic32_dec(&t->hash_tids_in_use);
425 if (family == FILTER_TYPE_IPV4)
426 rte_atomic32_dec(&t->tids_in_use);
430 mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
432 mbuf->data_len = sizeof(struct cpl_tid_release);
433 mbuf->pkt_len = mbuf->data_len;
434 mk_tid_release(mbuf, tid);
435 t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
442 void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
443 unsigned short family)
445 t->tid_tab[tid] = data;
446 if (t->hash_base && tid >= t->hash_base) {
447 if (family == FILTER_TYPE_IPV4)
448 rte_atomic32_inc(&t->hash_tids_in_use);
450 if (family == FILTER_TYPE_IPV4)
451 rte_atomic32_inc(&t->tids_in_use);
454 rte_atomic32_inc(&t->conns_in_use);
460 static void tid_free(struct tid_info *t)
464 rte_bitmap_free(t->ftid_bmap);
466 if (t->ftid_bmap_array)
467 t4_os_free(t->ftid_bmap_array);
469 t4_os_free(t->tid_tab);
472 memset(t, 0, sizeof(struct tid_info));
476 * Allocate and initialize the TID tables. Returns 0 on success.
478 static int tid_init(struct tid_info *t)
481 unsigned int ftid_bmap_size;
482 unsigned int natids = t->natids;
483 unsigned int max_ftids = t->nftids;
485 ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
486 size = t->ntids * sizeof(*t->tid_tab) +
487 max_ftids * sizeof(*t->ftid_tab) +
488 natids * sizeof(*t->atid_tab);
490 t->tid_tab = t4_os_alloc(size);
494 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
495 t->ftid_tab = (struct filter_entry *)&t->atid_tab[t->natids];
496 t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
497 if (!t->ftid_bmap_array) {
502 t4_os_lock_init(&t->atid_lock);
503 t4_os_lock_init(&t->ftid_lock);
507 rte_atomic32_init(&t->tids_in_use);
508 rte_atomic32_set(&t->tids_in_use, 0);
509 rte_atomic32_init(&t->conns_in_use);
510 rte_atomic32_set(&t->conns_in_use, 0);
512 /* Setup the free list for atid_tab and clear the stid bitmap. */
515 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
516 t->afree = t->atid_tab;
519 t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
529 static inline bool is_x_1g_port(const struct link_config *lc)
531 return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
534 static inline bool is_x_10g_port(const struct link_config *lc)
536 unsigned int speeds, high_speeds;
538 speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps));
539 high_speeds = speeds &
540 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
542 return high_speeds != 0;
545 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
546 unsigned int us, unsigned int cnt,
547 unsigned int size, unsigned int iqe_size)
550 cxgb4_set_rspq_intr_params(q, us, cnt);
551 q->iqe_len = iqe_size;
555 int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev)
557 struct port_info *pi = eth_dev->data->dev_private;
558 struct adapter *adap = pi->adapter;
559 struct sge *s = &adap->sge;
560 unsigned int max_queues = s->max_ethqsets / adap->params.nports;
562 if ((eth_dev->data->nb_rx_queues < 1) ||
563 (eth_dev->data->nb_tx_queues < 1))
566 if ((eth_dev->data->nb_rx_queues > max_queues) ||
567 (eth_dev->data->nb_tx_queues > max_queues))
570 if (eth_dev->data->nb_rx_queues > pi->rss_size)
573 /* We must configure RSS, since config has changed*/
574 pi->flags &= ~PORT_RSS_DONE;
576 pi->n_rx_qsets = eth_dev->data->nb_rx_queues;
577 pi->n_tx_qsets = eth_dev->data->nb_tx_queues;
582 void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev)
584 struct port_info *pi = eth_dev->data->dev_private;
585 struct adapter *adap = pi->adapter;
586 struct sge *s = &adap->sge;
587 unsigned int i, nb_ports = 0, qidx = 0;
588 unsigned int q_per_port = 0;
590 if (!(adap->flags & CFG_QUEUES)) {
591 for_each_port(adap, i) {
592 struct port_info *tpi = adap2pinfo(adap, i);
594 nb_ports += (is_x_10g_port(&tpi->link_cfg)) ||
595 is_x_1g_port(&tpi->link_cfg) ? 1 : 0;
599 * We default up to # of cores queues per 1G/10G port.
602 q_per_port = (s->max_ethqsets -
603 (adap->params.nports - nb_ports)) /
606 if (q_per_port > rte_lcore_count())
607 q_per_port = rte_lcore_count();
609 for_each_port(adap, i) {
610 struct port_info *pi = adap2pinfo(adap, i);
612 pi->first_qset = qidx;
614 /* Initially n_rx_qsets == n_tx_qsets */
615 pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) ||
616 is_x_1g_port(&pi->link_cfg)) ?
618 pi->n_tx_qsets = pi->n_rx_qsets;
620 if (pi->n_rx_qsets > pi->rss_size)
621 pi->n_rx_qsets = pi->rss_size;
623 qidx += pi->n_rx_qsets;
626 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
627 struct sge_eth_rxq *r = &s->ethrxq[i];
629 init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
631 r->fl.size = (r->usembufs ? 1024 : 72);
634 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
635 s->ethtxq[i].q.size = 1024;
637 init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
638 adap->flags |= CFG_QUEUES;
642 void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats)
644 t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats,
648 void cxgbe_stats_reset(struct port_info *pi)
650 t4_clr_port_stats(pi->adapter, pi->tx_chan);
653 static void setup_memwin(struct adapter *adap)
657 /* For T5, only relative offset inside the PCIe BAR is passed */
658 mem_win0_base = MEMWIN0_BASE;
661 * Set up memory window for accessing adapter memory ranges. (Read
662 * back MA register to ensure that changes propagate before we attempt
663 * to use the new values.)
666 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
668 mem_win0_base | V_BIR(0) |
669 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
671 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
675 int cxgbe_init_rss(struct adapter *adap)
682 err = t4_init_rss_mode(adap, adap->mbox);
687 for_each_port(adap, i) {
688 struct port_info *pi = adap2pinfo(adap, i);
690 pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
694 pi->rss_hf = CXGBE_RSS_HF_ALL;
700 * Dump basic information about the adapter.
702 void cxgbe_print_adapter_info(struct adapter *adap)
705 * Hardware/Firmware/etc. Version/Revision IDs.
707 t4_dump_version_info(adap);
710 void cxgbe_print_port_info(struct adapter *adap)
714 struct rte_pci_addr *loc = &adap->pdev->addr;
716 for_each_port(adap, i) {
717 const struct port_info *pi = adap2pinfo(adap, i);
720 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
721 bufp += sprintf(bufp, "100M/");
722 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
723 bufp += sprintf(bufp, "1G/");
724 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
725 bufp += sprintf(bufp, "10G/");
726 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
727 bufp += sprintf(bufp, "25G/");
728 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
729 bufp += sprintf(bufp, "40G/");
730 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
731 bufp += sprintf(bufp, "50G/");
732 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
733 bufp += sprintf(bufp, "100G/");
736 sprintf(bufp, "BASE-%s",
737 t4_get_port_type_description(
738 (enum fw_port_type)pi->port_type));
741 " " PCI_PRI_FMT " Chelsio rev %d %s %s\n",
742 loc->domain, loc->bus, loc->devid, loc->function,
743 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
744 (adap->flags & USING_MSIX) ? " MSI-X" :
745 (adap->flags & USING_MSI) ? " MSI" : "");
749 static int check_devargs_handler(const char *key, const char *value, void *p)
751 if (!strncmp(key, CXGBE_DEVARG_CMN_KEEP_OVLAN, strlen(key)) ||
752 !strncmp(key, CXGBE_DEVARG_CMN_TX_MODE_LATENCY, strlen(key)) ||
753 !strncmp(key, CXGBE_DEVARG_VF_FORCE_LINK_UP, strlen(key))) {
754 if (!strncmp(value, "1", 1)) {
755 bool *dst_val = (bool *)p;
761 if (!strncmp(key, CXGBE_DEVARG_PF_FILTER_MODE, strlen(key)) ||
762 !strncmp(key, CXGBE_DEVARG_PF_FILTER_MASK, strlen(key))) {
763 u32 *dst_val = (u32 *)p;
767 arg_val = strtoul(value, &endptr, 16);
768 if (errno || endptr == value)
777 static int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key,
780 struct rte_kvargs *kvlist;
786 kvlist = rte_kvargs_parse(devargs->args, NULL);
790 if (!rte_kvargs_count(kvlist, key))
793 ret = rte_kvargs_process(kvlist, key, check_devargs_handler, p);
796 rte_kvargs_free(kvlist);
801 static void cxgbe_get_devargs_int(struct adapter *adap, bool *dst,
802 const char *key, bool default_value)
804 struct rte_pci_device *pdev = adap->pdev;
806 bool devarg_value = default_value;
808 *dst = default_value;
812 ret = cxgbe_get_devargs(pdev->device.devargs, key, &devarg_value);
819 static void cxgbe_get_devargs_u32(struct adapter *adap, u32 *dst,
820 const char *key, u32 default_value)
822 struct rte_pci_device *pdev = adap->pdev;
823 u32 devarg_value = default_value;
826 *dst = default_value;
830 ret = cxgbe_get_devargs(pdev->device.devargs, key, &devarg_value);
837 void cxgbe_process_devargs(struct adapter *adap)
839 cxgbe_get_devargs_int(adap, &adap->devargs.keep_ovlan,
840 CXGBE_DEVARG_CMN_KEEP_OVLAN, false);
841 cxgbe_get_devargs_int(adap, &adap->devargs.tx_mode_latency,
842 CXGBE_DEVARG_CMN_TX_MODE_LATENCY, false);
843 cxgbe_get_devargs_int(adap, &adap->devargs.force_link_up,
844 CXGBE_DEVARG_VF_FORCE_LINK_UP, false);
845 cxgbe_get_devargs_u32(adap, &adap->devargs.filtermode,
846 CXGBE_DEVARG_PF_FILTER_MODE, 0);
847 cxgbe_get_devargs_u32(adap, &adap->devargs.filtermask,
848 CXGBE_DEVARG_PF_FILTER_MASK, 0);
851 static void configure_vlan_types(struct adapter *adapter)
855 for_each_port(adapter, i) {
856 /* OVLAN Type 0x88a8 */
857 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
858 V_OVLAN_MASK(M_OVLAN_MASK) |
859 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
860 V_OVLAN_MASK(M_OVLAN_MASK) |
861 V_OVLAN_ETYPE(0x88a8));
862 /* OVLAN Type 0x9100 */
863 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
864 V_OVLAN_MASK(M_OVLAN_MASK) |
865 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
866 V_OVLAN_MASK(M_OVLAN_MASK) |
867 V_OVLAN_ETYPE(0x9100));
870 t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
871 V_IVLAN_ETYPE(M_IVLAN_ETYPE),
872 V_IVLAN_ETYPE(0x8100));
874 t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
875 F_OVLAN_EN0 | F_OVLAN_EN1 |
877 F_OVLAN_EN0 | F_OVLAN_EN1 |
881 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, V_RM_OVLAN(1),
882 V_RM_OVLAN(!adapter->devargs.keep_ovlan));
885 static int cxgbe_get_filter_vnic_mode_from_devargs(u32 val)
889 vnic_mode = val & (CXGBE_DEVARGS_FILTER_MODE_PF_VF |
890 CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER);
893 case CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER:
894 return CXGBE_FILTER_VNIC_MODE_OVLAN;
895 case CXGBE_DEVARGS_FILTER_MODE_PF_VF:
896 return CXGBE_FILTER_VNIC_MODE_PFVF;
902 return CXGBE_FILTER_VNIC_MODE_NONE;
905 static int cxgbe_get_filter_mode_from_devargs(u32 val, bool closest_match)
907 int vnic_mode, fmode = 0;
911 if (val >= CXGBE_DEVARGS_FILTER_MODE_MAX) {
912 pr_err("Unsupported flags set in filter mode. Must be < 0x%x\n",
913 CXGBE_DEVARGS_FILTER_MODE_MAX);
917 vnic_mode = cxgbe_get_filter_vnic_mode_from_devargs(val);
919 pr_err("Unsupported Vnic-mode, more than 1 Vnic-mode selected\n");
925 if (val & CXGBE_DEVARGS_FILTER_MODE_PHYSICAL_PORT)
927 if (val & CXGBE_DEVARGS_FILTER_MODE_ETHERNET_DSTMAC)
929 if (val & CXGBE_DEVARGS_FILTER_MODE_ETHERNET_ETHTYPE)
930 fmode |= F_ETHERTYPE;
931 if (val & CXGBE_DEVARGS_FILTER_MODE_VLAN_INNER)
933 if (val & CXGBE_DEVARGS_FILTER_MODE_IP_TOS)
935 if (val & CXGBE_DEVARGS_FILTER_MODE_IP_PROTOCOL)
938 for (i = 0; i < ARRAY_SIZE(cxgbe_filter_mode_features); i++) {
939 if ((cxgbe_filter_mode_features[i] & fmode) == fmode) {
948 return closest_match ? cxgbe_filter_mode_features[i] : fmode;
951 static int configure_filter_mode_mask(struct adapter *adap)
953 u32 params[2], val[2], nparams = 0;
956 if (!adap->devargs.filtermode && !adap->devargs.filtermask)
959 if (!adap->devargs.filtermode || !adap->devargs.filtermask) {
960 pr_err("Unsupported, Provide both filtermode and filtermask devargs\n");
964 if (adap->devargs.filtermask & ~adap->devargs.filtermode) {
965 pr_err("Unsupported, filtermask (0x%x) must be subset of filtermode (0x%x)\n",
966 adap->devargs.filtermask, adap->devargs.filtermode);
971 params[0] = CXGBE_FW_PARAM_DEV(FILTER) |
972 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK);
974 ret = cxgbe_get_filter_mode_from_devargs(adap->devargs.filtermode,
977 pr_err("Unsupported filtermode devargs combination:0x%x\n",
978 adap->devargs.filtermode);
982 val[0] = V_FW_PARAMS_PARAM_FILTER_MODE(ret);
984 ret = cxgbe_get_filter_mode_from_devargs(adap->devargs.filtermask,
987 pr_err("Unsupported filtermask devargs combination:0x%x\n",
988 adap->devargs.filtermask);
992 val[0] |= V_FW_PARAMS_PARAM_FILTER_MASK(ret);
996 ret = cxgbe_get_filter_vnic_mode_from_devargs(adap->devargs.filtermode);
1001 params[1] = CXGBE_FW_PARAM_DEV(FILTER) |
1002 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE);
1009 return t4_set_params(adap, adap->mbox, adap->pf, 0, nparams,
1013 static void configure_pcie_ext_tag(struct adapter *adapter)
1016 int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
1022 t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v);
1023 v |= PCI_EXP_DEVCTL_EXT_TAG;
1024 t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v);
1025 if (is_t6(adapter->params.chip)) {
1026 t4_set_reg_field(adapter, A_PCIE_CFG2,
1027 V_T6_TOTMAXTAG(M_T6_TOTMAXTAG),
1029 t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
1030 V_T6_MINTAG(M_T6_MINTAG),
1033 t4_set_reg_field(adapter, A_PCIE_CFG2,
1034 V_TOTMAXTAG(M_TOTMAXTAG),
1036 t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
1043 /* Figure out how many Queue Sets we can support */
1044 void cxgbe_configure_max_ethqsets(struct adapter *adapter)
1046 unsigned int ethqsets;
1049 * We need to reserve an Ingress Queue for the Asynchronous Firmware
1052 * For each Queue Set, we'll need the ability to allocate two Egress
1053 * Contexts -- one for the Ingress Queue Free List and one for the TX
1056 if (is_pf4(adapter)) {
1057 struct pf_resources *pfres = &adapter->params.pfres;
1059 ethqsets = pfres->niqflint - 1;
1060 if (pfres->neq < ethqsets * 2)
1061 ethqsets = pfres->neq / 2;
1063 struct vf_resources *vfres = &adapter->params.vfres;
1065 ethqsets = vfres->niqflint - 1;
1066 if (vfres->nethctrl != ethqsets)
1067 ethqsets = min(vfres->nethctrl, ethqsets);
1068 if (vfres->neq < ethqsets * 2)
1069 ethqsets = vfres->neq / 2;
1072 if (ethqsets > MAX_ETH_QSETS)
1073 ethqsets = MAX_ETH_QSETS;
1074 adapter->sge.max_ethqsets = ethqsets;
1078 * Tweak configuration based on system architecture, etc. Most of these have
1079 * defaults assigned to them by Firmware Configuration Files (if we're using
1080 * them) but need to be explicitly set if we're using hard-coded
1081 * initialization. So these are essentially common tweaks/settings for
1082 * Configuration Files and hard-coded initialization ...
1084 static int adap_init0_tweaks(struct adapter *adapter)
1089 * Fix up various Host-Dependent Parameters like Page Size, Cache
1090 * Line Size, etc. The firmware default is for a 4KB Page Size and
1091 * 64B Cache Line Size ...
1093 t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES,
1097 * Keep the chip default offset to deliver Ingress packets into our
1098 * DMA buffers to zero
1101 t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
1102 V_PKTSHIFT(rx_dma_offset));
1104 t4_set_reg_field(adapter, A_SGE_FLM_CFG,
1105 V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
1106 V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
1108 t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,
1109 V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));
1111 t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
1112 V_IDMAARBROUNDROBIN(1U));
1115 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
1116 * adds the pseudo header itself.
1118 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
1119 F_CSUM_HAS_PSEUDO_HDR, 0);
1125 * Attempt to initialize the adapter via a Firmware Configuration File.
1127 static int adap_init0_config(struct adapter *adapter, int reset)
1129 u32 finiver, finicsum, cfcsum, param, val;
1130 struct fw_caps_config_cmd caps_cmd;
1131 unsigned long mtype = 0, maddr = 0;
1132 u8 config_issued = 0;
1133 char config_name[20];
1137 * Reset device if necessary.
1140 ret = t4_fw_reset(adapter, adapter->mbox,
1141 F_PIORSTMODE | F_PIORST);
1143 dev_warn(adapter, "Firmware reset failed, error %d\n",
1149 cfg_addr = t4_flash_cfg_addr(adapter);
1152 dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n",
1157 strcpy(config_name, "On Flash");
1158 mtype = FW_MEMTYPE_CF_FLASH;
1161 /* Enable HASH filter region when support is available. */
1163 param = CXGBE_FW_PARAM_DEV(HASHFILTER_WITH_OFLD);
1164 t4_set_params(adapter, adapter->mbox, adapter->pf, 0, 1,
1168 * Issue a Capability Configuration command to the firmware to get it
1169 * to parse the Configuration File. We don't use t4_fw_config_file()
1170 * because we want the ability to modify various features after we've
1171 * processed the configuration file ...
1173 memset(&caps_cmd, 0, sizeof(caps_cmd));
1174 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1175 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1176 caps_cmd.cfvalid_to_len16 =
1177 cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1178 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1179 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
1180 FW_LEN16(caps_cmd));
1181 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
1184 * If the CAPS_CONFIG failed with an ENOENT (for a Firmware
1185 * Configuration File in FLASH), our last gasp effort is to use the
1186 * Firmware Configuration File which is embedded in the firmware. A
1187 * very few early versions of the firmware didn't have one embedded
1188 * but we can ignore those.
1190 if (ret == -ENOENT) {
1191 dev_info(adapter, "%s: Going for embedded config in firmware..\n",
1194 memset(&caps_cmd, 0, sizeof(caps_cmd));
1195 caps_cmd.op_to_write =
1196 cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1197 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1198 caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd));
1199 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
1200 sizeof(caps_cmd), &caps_cmd);
1201 strcpy(config_name, "Firmware Default");
1208 finiver = be32_to_cpu(caps_cmd.finiver);
1209 finicsum = be32_to_cpu(caps_cmd.finicsum);
1210 cfcsum = be32_to_cpu(caps_cmd.cfcsum);
1211 if (finicsum != cfcsum)
1212 dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n",
1216 * If we're a pure NIC driver then disable all offloading facilities.
1217 * This will allow the firmware to optimize aspects of the hardware
1218 * configuration which will result in improved performance.
1220 caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
1221 caps_cmd.toecaps = 0;
1222 caps_cmd.iscsicaps = 0;
1223 caps_cmd.rdmacaps = 0;
1224 caps_cmd.fcoecaps = 0;
1225 caps_cmd.cryptocaps = 0;
1228 * And now tell the firmware to use the configuration we just loaded.
1230 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1231 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1232 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
1233 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
1236 dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n",
1242 * Tweak configuration based on system architecture, etc.
1244 ret = adap_init0_tweaks(adapter);
1246 dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret);
1251 * And finally tell the firmware to initialize itself using the
1252 * parameters from the Configuration File.
1254 ret = t4_fw_initialize(adapter, adapter->mbox);
1256 dev_warn(adapter, "Initializing Firmware failed, error %d\n",
1262 * Return successfully and note that we're operating with parameters
1263 * not supplied by the driver, rather than from hard-wired
1264 * initialization constants buried in the driver.
1267 "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
1268 config_name, finiver, cfcsum);
1273 * Something bad happened. Return the error ... (If the "error"
1274 * is that there's no Configuration File on the adapter we don't
1275 * want to issue a warning since this is fairly common.)
1278 if (config_issued && ret != -ENOENT)
1279 dev_warn(adapter, "\"%s\" configuration file error %d\n",
1282 dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret);
1286 static int adap_init0(struct adapter *adap)
1288 struct fw_caps_config_cmd caps_cmd;
1291 enum dev_state state;
1292 u32 params[7], val[7];
1294 int mbox = adap->mbox;
1297 * Contact FW, advertising Master capability.
1299 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
1301 dev_err(adap, "%s: could not connect to FW, error %d\n",
1306 CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__,
1310 adap->flags |= MASTER_PF;
1312 if (state == DEV_STATE_INIT) {
1314 * Force halt and reset FW because a previous instance may have
1315 * exited abnormally without properly shutting down
1317 ret = t4_fw_halt(adap, adap->mbox, reset);
1319 dev_err(adap, "Failed to halt. Exit.\n");
1323 ret = t4_fw_restart(adap, adap->mbox, reset);
1325 dev_err(adap, "Failed to restart. Exit.\n");
1328 state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
1331 t4_get_version_info(adap);
1333 ret = t4_get_core_clock(adap, &adap->params.vpd);
1335 dev_err(adap, "%s: could not get core clock, error %d\n",
1341 * If the firmware is initialized already (and we're not forcing a
1342 * master initialization), note that we're living with existing
1343 * adapter parameters. Otherwise, it's time to try initializing the
1346 if (state == DEV_STATE_INIT) {
1347 dev_info(adap, "Coming up as %s: Adapter already initialized\n",
1348 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
1350 dev_info(adap, "Coming up as MASTER: Initializing adapter\n");
1352 ret = adap_init0_config(adap, reset);
1353 if (ret == -ENOENT) {
1355 "No Configuration File present on adapter. Using hard-wired configuration parameters.\n");
1360 dev_err(adap, "could not initialize adapter, error %d\n", -ret);
1364 /* Now that we've successfully configured and initialized the adapter
1365 * (or found it already initialized), we can ask the Firmware what
1366 * resources it has provisioned for us.
1368 ret = t4_get_pfres(adap);
1370 dev_err(adap->pdev_dev,
1371 "Unable to retrieve resource provisioning info\n");
1375 /* Find out what ports are available to us. */
1376 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
1377 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
1378 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
1380 dev_err(adap, "%s: failure in t4_query_params; error = %d\n",
1385 adap->params.nports = hweight32(port_vec);
1386 adap->params.portvec = port_vec;
1388 dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
1389 adap->params.nports);
1392 * Give the SGE code a chance to pull in anything that it needs ...
1393 * Note that this must be called after we retrieve our VPD parameters
1394 * in order to know how to convert core ticks to seconds, etc.
1396 ret = t4_sge_init(adap);
1398 dev_err(adap, "t4_sge_init failed with error %d\n",
1404 * Grab some of our basic fundamental operating parameters.
1406 params[0] = CXGBE_FW_PARAM_PFVF(L2T_START);
1407 params[1] = CXGBE_FW_PARAM_PFVF(L2T_END);
1408 params[2] = CXGBE_FW_PARAM_PFVF(FILTER_START);
1409 params[3] = CXGBE_FW_PARAM_PFVF(FILTER_END);
1410 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 4, params, val);
1413 adap->l2t_start = val[0];
1414 adap->l2t_end = val[1];
1415 adap->tids.ftid_base = val[2];
1416 adap->tids.nftids = val[3] - val[2] + 1;
1418 params[0] = CXGBE_FW_PARAM_PFVF(CLIP_START);
1419 params[1] = CXGBE_FW_PARAM_PFVF(CLIP_END);
1420 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
1423 adap->clipt_start = val[0];
1424 adap->clipt_end = val[1];
1427 * Get device capabilities so we can determine what resources we need
1430 memset(&caps_cmd, 0, sizeof(caps_cmd));
1431 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1432 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1433 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
1434 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
1439 if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
1440 is_t6(adap->params.chip)) {
1441 if (cxgbe_init_hash_filter(adap) < 0)
1445 /* See if FW supports FW_FILTER2 work request */
1446 if (is_t4(adap->params.chip)) {
1447 adap->params.filter2_wr_support = 0;
1449 params[0] = CXGBE_FW_PARAM_DEV(FILTER2_WR);
1450 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1452 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
1455 /* Check if FW supports returning vin.
1456 * If this is not supported, driver will interpret
1457 * these values from viid.
1459 params[0] = CXGBE_FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
1460 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1462 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
1464 /* query tid-related parameters */
1465 params[0] = CXGBE_FW_PARAM_DEV(NTID);
1466 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
1470 adap->tids.ntids = val[0];
1471 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
1473 /* If we're running on newer firmware, let it know that we're
1474 * prepared to deal with encapsulated CPL messages. Older
1475 * firmware won't understand this and we'll just get
1476 * unencapsulated messages ...
1478 params[0] = CXGBE_FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1480 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1483 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
1484 * capability. Earlier versions of the firmware didn't have the
1485 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
1486 * permission to use ULPTX MEMWRITE DSGL.
1488 if (is_t4(adap->params.chip)) {
1489 adap->params.ulptx_memwrite_dsgl = false;
1491 params[0] = CXGBE_FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
1492 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1494 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
1497 /* Query for max number of packets that can be coalesced for Tx */
1498 params[0] = CXGBE_FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR);
1499 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1500 if (!ret && val[0] > 0)
1501 adap->params.max_tx_coalesce_num = val[0];
1503 adap->params.max_tx_coalesce_num = ETH_COALESCE_PKT_NUM;
1506 * The MTU/MSS Table is initialized by now, so load their values. If
1507 * we're initializing the adapter, then we'll make any modifications
1508 * we want to the MTU/MSS Table and also initialize the congestion
1511 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
1512 if (state != DEV_STATE_INIT) {
1516 * The default MTU Table contains values 1492 and 1500.
1517 * However, for TCP, it's better to have two values which are
1518 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
1519 * This allows us to have a TCP Data Payload which is a
1520 * multiple of 8 regardless of what combination of TCP Options
1521 * are in use (always a multiple of 4 bytes) which is
1522 * important for performance reasons. For instance, if no
1523 * options are in use, then we have a 20-byte IP header and a
1524 * 20-byte TCP header. In this case, a 1500-byte MSS would
1525 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
1526 * which is not a multiple of 8. So using an MSS of 1488 in
1527 * this case results in a TCP Data Payload of 1448 bytes which
1528 * is a multiple of 8. On the other hand, if 12-byte TCP Time
1529 * Stamps have been negotiated, then an MTU of 1500 bytes
1530 * results in a TCP Data Payload of 1448 bytes which, as
1531 * above, is a multiple of 8 bytes ...
1533 for (i = 0; i < NMTUS; i++)
1534 if (adap->params.mtus[i] == 1492) {
1535 adap->params.mtus[i] = 1488;
1539 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
1540 adap->params.b_wnd);
1542 t4_init_sge_params(adap);
1543 ret = configure_filter_mode_mask(adap);
1546 t4_init_tp_params(adap);
1547 configure_pcie_ext_tag(adap);
1548 configure_vlan_types(adap);
1549 cxgbe_configure_max_ethqsets(adap);
1551 adap->params.drv_memwin = MEMWIN_NIC;
1552 adap->flags |= FW_OK;
1553 dev_debug(adap, "%s: returning zero..\n", __func__);
1557 * Something bad happened. If a command timed out or failed with EIO
1558 * FW does not operate within its spec or something catastrophic
1559 * happened to HW/FW, stop issuing commands.
1562 if (ret != -ETIMEDOUT && ret != -EIO)
1563 t4_fw_bye(adap, adap->mbox);
1568 * t4_os_portmod_changed - handle port module changes
1569 * @adap: the adapter associated with the module change
1570 * @port_id: the port index whose module status has changed
1572 * This is the OS-dependent handler for port module changes. It is
1573 * invoked when a port module is removed or inserted for any OS-specific
1576 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
1578 static const char * const mod_str[] = {
1579 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
1582 const struct port_info *pi = adap2pinfo(adap, port_id);
1584 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
1585 dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
1586 else if (pi->mod_type < ARRAY_SIZE(mod_str))
1587 dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
1588 mod_str[pi->mod_type]);
1589 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
1590 dev_info(adap, "Port%d: unsupported port module inserted\n",
1592 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
1593 dev_info(adap, "Port%d: unknown port module inserted\n",
1595 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
1596 dev_info(adap, "Port%d: transceiver module error\n",
1599 dev_info(adap, "Port%d: unknown module type %d inserted\n",
1600 pi->port_id, pi->mod_type);
1603 bool cxgbe_force_linkup(struct adapter *adap)
1606 return false; /* force_linkup not required for pf driver */
1608 return adap->devargs.force_link_up;
1612 * link_start - enable a port
1613 * @dev: the port to enable
1615 * Performs the MAC and PHY actions needed to enable a port.
1617 int cxgbe_link_start(struct port_info *pi)
1619 struct adapter *adapter = pi->adapter;
1624 mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1625 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
1627 conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads;
1630 * We do not set address filters and promiscuity here, the stack does
1631 * that step explicitly.
1633 ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
1634 !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
1637 ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
1638 (u8 *)&pi->eth_dev->data->mac_addrs[0]);
1640 pi->xact_addr_filt = ret;
1644 if (ret == 0 && is_pf4(adapter))
1645 ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
1649 * Enabling a Virtual Interface can result in an interrupt
1650 * during the processing of the VI Enable command and, in some
1651 * paths, result in an attempt to issue another command in the
1652 * interrupt context. Thus, we disable interrupts during the
1653 * course of the VI Enable command ...
1655 ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
1659 if (ret == 0 && cxgbe_force_linkup(adapter))
1660 pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1665 * cxgbe_write_rss_conf - flash the RSS configuration for a given port
1667 * @rss_hf: Hash configuration to apply
1669 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
1671 struct adapter *adapter = pi->adapter;
1672 const struct sge_eth_rxq *rxq;
1677 /* Should never be called before setting up sge eth rx queues */
1678 if (!(adapter->flags & FULL_INIT_DONE)) {
1679 dev_err(adap, "%s No RXQs available on port %d\n",
1680 __func__, pi->port_id);
1684 /* Don't allow unsupported hash functions */
1685 if (rss_hf & ~CXGBE_RSS_HF_ALL)
1688 if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
1689 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
1691 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1692 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
1694 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1695 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
1696 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1698 if (rss_hf & CXGBE_RSS_HF_IPV6_MASK)
1699 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
1701 if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK)
1702 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
1703 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
1705 if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK)
1706 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
1707 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
1708 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1710 rxq = &adapter->sge.ethrxq[pi->first_qset];
1711 rss = rxq[0].rspq.abs_id;
1713 /* If Tunnel All Lookup isn't specified in the global RSS
1714 * Configuration, then we need to specify a default Ingress
1715 * Queue for any ingress packets which aren't hashed. We'll
1716 * use our first ingress queue ...
1718 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
1724 * cxgbe_write_rss - write the RSS table for a given port
1726 * @queues: array of queue indices for RSS
1728 * Sets up the portion of the HW RSS table for the port's VI to distribute
1729 * packets to the Rx queues in @queues.
1731 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
1735 struct adapter *adapter = pi->adapter;
1736 const struct sge_eth_rxq *rxq;
1738 /* Should never be called before setting up sge eth rx queues */
1739 BUG_ON(!(adapter->flags & FULL_INIT_DONE));
1741 rxq = &adapter->sge.ethrxq[pi->first_qset];
1742 rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
1746 /* map the queue indices to queue ids */
1747 for (i = 0; i < pi->rss_size; i++, queues++)
1748 rss[i] = rxq[*queues].rspq.abs_id;
1750 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
1751 pi->rss_size, rss, pi->rss_size);
1757 * setup_rss - configure RSS
1758 * @adapter: the adapter
1760 * Sets up RSS to distribute packets to multiple receive queues. We
1761 * configure the RSS CPU lookup table to distribute to the number of HW
1762 * receive queues, and the response queue lookup table to narrow that
1763 * down to the response queues actually configured for each port.
1764 * We always configure the RSS mapping for all ports since the mapping
1765 * table has plenty of entries.
1767 int cxgbe_setup_rss(struct port_info *pi)
1770 struct adapter *adapter = pi->adapter;
1772 dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n",
1773 __func__, pi->rss_size, pi->n_rx_qsets);
1775 if (!(pi->flags & PORT_RSS_DONE)) {
1776 if (adapter->flags & FULL_INIT_DONE) {
1777 /* Fill default values with equal distribution */
1778 for (j = 0; j < pi->rss_size; j++)
1779 pi->rss[j] = j % pi->n_rx_qsets;
1781 err = cxgbe_write_rss(pi, pi->rss);
1785 err = cxgbe_write_rss_conf(pi, pi->rss_hf);
1788 pi->flags |= PORT_RSS_DONE;
1795 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1797 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
1799 /* 0-increment GTS to start the timer and enable interrupts */
1800 t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
1801 T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
1802 V_SEINTARM(q->intr_params) |
1803 V_INGRESSQID(q->cntxt_id));
1806 void cxgbe_enable_rx_queues(struct port_info *pi)
1808 struct adapter *adap = pi->adapter;
1809 struct sge *s = &adap->sge;
1812 for (i = 0; i < pi->n_rx_qsets; i++)
1813 enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq);
1817 * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps.
1818 * @port_type: Firmware Port Type
1819 * @fw_caps: Firmware Port Capabilities
1820 * @speed_caps: Device Info Speed Capabilities
1822 * Translate a Firmware Port Capabilities specification to Device Info
1823 * Speed Capabilities.
1825 static void fw_caps_to_speed_caps(enum fw_port_type port_type,
1826 unsigned int fw_caps,
1829 #define SET_SPEED(__speed_name) \
1831 *speed_caps |= ETH_LINK_ ## __speed_name; \
1834 #define FW_CAPS_TO_SPEED(__fw_name) \
1836 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1837 SET_SPEED(__fw_name); \
1840 switch (port_type) {
1841 case FW_PORT_TYPE_BT_SGMII:
1842 case FW_PORT_TYPE_BT_XFI:
1843 case FW_PORT_TYPE_BT_XAUI:
1844 FW_CAPS_TO_SPEED(SPEED_100M);
1845 FW_CAPS_TO_SPEED(SPEED_1G);
1846 FW_CAPS_TO_SPEED(SPEED_10G);
1849 case FW_PORT_TYPE_KX4:
1850 case FW_PORT_TYPE_KX:
1851 case FW_PORT_TYPE_FIBER_XFI:
1852 case FW_PORT_TYPE_FIBER_XAUI:
1853 case FW_PORT_TYPE_SFP:
1854 case FW_PORT_TYPE_QSFP_10G:
1855 case FW_PORT_TYPE_QSA:
1856 FW_CAPS_TO_SPEED(SPEED_1G);
1857 FW_CAPS_TO_SPEED(SPEED_10G);
1860 case FW_PORT_TYPE_KR:
1861 SET_SPEED(SPEED_10G);
1864 case FW_PORT_TYPE_BP_AP:
1865 case FW_PORT_TYPE_BP4_AP:
1866 SET_SPEED(SPEED_1G);
1867 SET_SPEED(SPEED_10G);
1870 case FW_PORT_TYPE_BP40_BA:
1871 case FW_PORT_TYPE_QSFP:
1872 SET_SPEED(SPEED_40G);
1875 case FW_PORT_TYPE_CR_QSFP:
1876 case FW_PORT_TYPE_SFP28:
1877 case FW_PORT_TYPE_KR_SFP28:
1878 FW_CAPS_TO_SPEED(SPEED_1G);
1879 FW_CAPS_TO_SPEED(SPEED_10G);
1880 FW_CAPS_TO_SPEED(SPEED_25G);
1883 case FW_PORT_TYPE_CR2_QSFP:
1884 SET_SPEED(SPEED_50G);
1887 case FW_PORT_TYPE_KR4_100G:
1888 case FW_PORT_TYPE_CR4_QSFP:
1889 FW_CAPS_TO_SPEED(SPEED_25G);
1890 FW_CAPS_TO_SPEED(SPEED_40G);
1891 FW_CAPS_TO_SPEED(SPEED_50G);
1892 FW_CAPS_TO_SPEED(SPEED_100G);
1899 #undef FW_CAPS_TO_SPEED
1904 * cxgbe_get_speed_caps - Fetch supported speed capabilities
1905 * @pi: Underlying port's info
1906 * @speed_caps: Device Info speed capabilities
1908 * Fetch supported speed capabilities of the underlying port.
1910 void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
1914 fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps,
1917 if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
1918 *speed_caps |= ETH_LINK_SPEED_FIXED;
1922 * cxgbe_set_link_status - Set device link up or down.
1923 * @pi: Underlying port's info
1924 * @status: 0 - down, 1 - up
1926 * Set the device link up or down.
1928 int cxgbe_set_link_status(struct port_info *pi, bool status)
1930 struct adapter *adapter = pi->adapter;
1933 err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status);
1935 dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
1940 t4_reset_link_config(adapter, pi->pidx);
1946 * cxgb_up - enable the adapter
1947 * @adap: adapter being enabled
1949 * Called when the first port is enabled, this function performs the
1950 * actions necessary to make an adapter operational, such as completing
1951 * the initialization of HW modules, and enabling interrupts.
1953 int cxgbe_up(struct adapter *adap)
1955 enable_rx(adap, &adap->sge.fw_evtq);
1956 t4_sge_tx_monitor_start(adap);
1958 t4_intr_enable(adap);
1959 adap->flags |= FULL_INIT_DONE;
1961 /* TODO: deadman watchdog ?? */
1968 int cxgbe_down(struct port_info *pi)
1970 return cxgbe_set_link_status(pi, false);
1974 * Release resources when all the ports have been stopped.
1976 void cxgbe_close(struct adapter *adapter)
1978 struct port_info *pi;
1981 if (adapter->flags & FULL_INIT_DONE) {
1982 tid_free(&adapter->tids);
1983 t4_cleanup_mpstcam(adapter);
1984 t4_cleanup_clip_tbl(adapter);
1985 t4_cleanup_l2t(adapter);
1986 t4_cleanup_smt(adapter);
1987 if (is_pf4(adapter))
1988 t4_intr_disable(adapter);
1989 t4_sge_tx_monitor_stop(adapter);
1990 t4_free_sge_resources(adapter);
1991 for_each_port(adapter, i) {
1992 pi = adap2pinfo(adapter, i);
1994 t4_free_vi(adapter, adapter->mbox,
1995 adapter->pf, 0, pi->viid);
1996 rte_eth_dev_release_port(pi->eth_dev);
1998 adapter->flags &= ~FULL_INIT_DONE;
2001 if (is_pf4(adapter) && (adapter->flags & FW_OK))
2002 t4_fw_bye(adapter, adapter->mbox);
2005 static void adap_smt_index(struct adapter *adapter, u32 *smt_start_idx,
2008 u32 params[2], smt_val[2];
2011 params[0] = CXGBE_FW_PARAM_PFVF(GET_SMT_START);
2012 params[1] = CXGBE_FW_PARAM_PFVF(GET_SMT_SIZE);
2014 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2015 2, params, smt_val);
2017 /* if FW doesn't recognize this command then set it to default setting
2018 * which is start index as 0 and size as 256.
2022 *smt_size = SMT_SIZE;
2024 *smt_start_idx = smt_val[0];
2025 /* smt size can be zero, if nsmt is not yet configured in
2026 * the config file or set as zero, then configure all the
2027 * remaining entries to this PF itself.
2030 *smt_size = SMT_SIZE - *smt_start_idx;
2032 *smt_size = smt_val[1];
2036 int cxgbe_probe(struct adapter *adapter)
2038 u32 smt_start_idx, smt_size;
2039 struct port_info *pi;
2045 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2046 chip = t4_get_chip_type(adapter,
2047 CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id));
2051 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
2052 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
2054 adapter->mbox = func;
2057 t4_os_lock_init(&adapter->mbox_lock);
2058 TAILQ_INIT(&adapter->mbox_list);
2059 t4_os_lock_init(&adapter->win0_lock);
2061 err = t4_prep_adapter(adapter);
2065 setup_memwin(adapter);
2066 err = adap_init0(adapter);
2068 dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
2073 if (!is_t4(adapter->params.chip)) {
2075 * The userspace doorbell BAR is split evenly into doorbell
2076 * regions, each associated with an egress queue. If this
2077 * per-queue region is large enough (at least UDBS_SEG_SIZE)
2078 * then it can be used to submit a tx work request with an
2079 * implied doorbell. Enable write combining on the BAR if
2080 * there is room for such work requests.
2082 int s_qpp, qpp, num_seg;
2084 s_qpp = (S_QUEUESPERPAGEPF0 +
2085 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) *
2087 qpp = 1 << ((t4_read_reg(adapter,
2088 A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)
2089 & M_QUEUESPERPAGEPF0);
2090 num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE;
2092 dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
2094 adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
2095 if (!adapter->bar2) {
2096 dev_err(adapter, "cannot map device bar2 region\n");
2100 t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) |
2104 for_each_port(adapter, i) {
2105 const unsigned int numa_node = rte_socket_id();
2106 char name[RTE_ETH_NAME_MAX_LEN];
2107 struct rte_eth_dev *eth_dev;
2109 snprintf(name, sizeof(name), "%s_%d",
2110 adapter->pdev->device.name, i);
2113 /* First port is already allocated by DPDK */
2114 eth_dev = adapter->eth_dev;
2119 * now do all data allocation - for eth_dev structure,
2120 * and internal (private) data for the remaining ports
2123 /* reserve an ethdev entry */
2124 eth_dev = rte_eth_dev_allocate(name);
2128 eth_dev->data->dev_private =
2129 rte_zmalloc_socket(name, sizeof(struct port_info),
2130 RTE_CACHE_LINE_SIZE, numa_node);
2131 if (!eth_dev->data->dev_private)
2135 pi = eth_dev->data->dev_private;
2136 adapter->port[i] = pi;
2137 pi->eth_dev = eth_dev;
2138 pi->adapter = adapter;
2139 pi->xact_addr_filt = -1;
2143 pi->eth_dev->device = &adapter->pdev->device;
2144 pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
2145 pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
2146 pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
2148 rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
2150 pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
2151 RTE_ETHER_ADDR_LEN, 0);
2152 if (!pi->eth_dev->data->mac_addrs) {
2153 dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
2160 /* First port will be notified by upper layer */
2161 rte_eth_dev_probing_finish(eth_dev);
2165 if (adapter->flags & FW_OK) {
2166 err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0);
2168 dev_err(adapter, "%s: t4_port_init failed with err %d\n",
2174 cxgbe_cfg_queues(adapter->eth_dev);
2176 cxgbe_print_adapter_info(adapter);
2177 cxgbe_print_port_info(adapter);
2179 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
2180 adapter->clipt_end);
2181 if (!adapter->clipt) {
2182 /* We tolerate a lack of clip_table, giving up some
2185 dev_warn(adapter, "could not allocate CLIP. Continuing\n");
2188 adap_smt_index(adapter, &smt_start_idx, &smt_size);
2189 adapter->smt = t4_init_smt(smt_start_idx, smt_size);
2191 dev_warn(adapter, "could not allocate SMT, continuing\n");
2193 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
2194 if (!adapter->l2t) {
2195 /* We tolerate a lack of L2T, giving up some functionality */
2196 dev_warn(adapter, "could not allocate L2T. Continuing\n");
2199 if (tid_init(&adapter->tids) < 0) {
2200 /* Disable filtering support */
2201 dev_warn(adapter, "could not allocate TID table, "
2202 "filter support disabled. Continuing\n");
2205 t4_os_lock_init(&adapter->flow_lock);
2207 adapter->mpstcam = t4_init_mpstcam(adapter);
2208 if (!adapter->mpstcam)
2209 dev_warn(adapter, "could not allocate mps tcam table."
2212 if (is_hashfilter(adapter)) {
2213 if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
2214 u32 hash_base, hash_reg;
2216 hash_reg = A_LE_DB_TID_HASHBASE;
2217 hash_base = t4_read_reg(adapter, hash_reg);
2218 adapter->tids.hash_base = hash_base / 4;
2221 /* Disable hash filtering support */
2223 "Maskless filter support disabled. Continuing\n");
2226 err = cxgbe_init_rss(adapter);
2233 for_each_port(adapter, i) {
2234 pi = adap2pinfo(adapter, i);
2236 t4_free_vi(adapter, adapter->mbox, adapter->pf,
2238 rte_eth_dev_release_port(pi->eth_dev);
2241 if (adapter->flags & FW_OK)
2242 t4_fw_bye(adapter, adapter->mbox);