1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
14 #include <netinet/in.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <ethdev_driver.h>
31 #include <ethdev_pci.h>
32 #include <rte_random.h>
34 #include <rte_kvargs.h>
36 #include "base/common.h"
37 #include "base/t4_regs.h"
38 #include "base/t4_msg.h"
40 #include "cxgbe_pfvf.h"
46 static const u16 cxgbe_filter_mode_features[] = {
47 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE |
49 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE |
51 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS |
53 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS |
55 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_PORT |
57 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_TOS |
59 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VLAN |
61 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VNIC_ID |
63 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VLAN |
65 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VNIC_ID |
67 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_VLAN | F_PORT |
69 (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_VNIC_ID | F_PORT |
71 (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_PROTOCOL | F_TOS |
73 (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_PORT),
74 (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_FCOE),
75 (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_PORT),
76 (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_FCOE),
77 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_PORT),
78 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_FCOE),
79 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID |
81 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID |
83 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_VLAN | F_PORT |
85 (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_VNIC_ID | F_PORT |
87 (F_FRAGMENTATION | F_MPSHITTYPE | F_TOS | F_VLAN | F_PORT | F_FCOE),
88 (F_FRAGMENTATION | F_MPSHITTYPE | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
89 (F_FRAGMENTATION | F_MPSHITTYPE | F_VLAN | F_VNIC_ID | F_FCOE),
90 (F_FRAGMENTATION | F_MACMATCH | F_ETHERTYPE | F_PROTOCOL | F_PORT |
92 (F_FRAGMENTATION | F_MACMATCH | F_ETHERTYPE | F_TOS | F_PORT | F_FCOE),
93 (F_FRAGMENTATION | F_MACMATCH | F_PROTOCOL | F_VLAN | F_PORT | F_FCOE),
94 (F_FRAGMENTATION | F_MACMATCH | F_PROTOCOL | F_VNIC_ID | F_PORT |
96 (F_FRAGMENTATION | F_MACMATCH | F_TOS | F_VLAN | F_PORT | F_FCOE),
97 (F_FRAGMENTATION | F_MACMATCH | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
98 (F_FRAGMENTATION | F_ETHERTYPE | F_VLAN | F_PORT | F_FCOE),
99 (F_FRAGMENTATION | F_ETHERTYPE | F_VNIC_ID | F_PORT | F_FCOE),
100 (F_FRAGMENTATION | F_PROTOCOL | F_TOS | F_VLAN | F_FCOE),
101 (F_FRAGMENTATION | F_PROTOCOL | F_TOS | F_VNIC_ID | F_FCOE),
102 (F_FRAGMENTATION | F_VLAN | F_VNIC_ID | F_PORT | F_FCOE),
103 (F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_PROTOCOL | F_PORT |
105 (F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS | F_PORT | F_FCOE),
106 (F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VLAN | F_PORT),
107 (F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VNIC_ID | F_PORT),
108 (F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VLAN | F_PORT),
109 (F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VNIC_ID | F_PORT),
110 (F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_PORT | F_FCOE),
111 (F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_PORT | F_FCOE),
112 (F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_PORT | F_FCOE),
113 (F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
114 (F_MPSHITTYPE | F_VLAN | F_VNIC_ID | F_PORT),
118 * Allocate a chunk of memory. The allocated memory is cleared.
120 void *t4_alloc_mem(size_t size)
122 return rte_zmalloc(NULL, size, 0);
126 * Free memory allocated through t4_alloc_mem().
128 void t4_free_mem(void *addr)
134 * Response queue handler for the FW event queue.
136 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
137 __rte_unused const struct pkt_gl *gl)
139 u8 opcode = ((const struct rss_header *)rsp)->opcode;
141 rsp++; /* skip RSS header */
144 * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
146 if (unlikely(opcode == CPL_FW4_MSG &&
147 ((const struct cpl_fw4_msg *)rsp)->type ==
150 opcode = ((const struct rss_header *)rsp)->opcode;
152 if (opcode != CPL_SGE_EGR_UPDATE) {
153 dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n",
159 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
161 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
162 const struct cpl_fw6_msg *msg = (const void *)rsp;
164 t4_handle_fw_rpl(q->adapter, msg->data);
165 } else if (opcode == CPL_ABORT_RPL_RSS) {
166 const struct cpl_abort_rpl_rss *p = (const void *)rsp;
168 cxgbe_hash_del_filter_rpl(q->adapter, p);
169 } else if (opcode == CPL_SET_TCB_RPL) {
170 const struct cpl_set_tcb_rpl *p = (const void *)rsp;
172 cxgbe_filter_rpl(q->adapter, p);
173 } else if (opcode == CPL_ACT_OPEN_RPL) {
174 const struct cpl_act_open_rpl *p = (const void *)rsp;
176 cxgbe_hash_filter_rpl(q->adapter, p);
177 } else if (opcode == CPL_L2T_WRITE_RPL) {
178 const struct cpl_l2t_write_rpl *p = (const void *)rsp;
180 cxgbe_do_l2t_write_rpl(q->adapter, p);
181 } else if (opcode == CPL_SMT_WRITE_RPL) {
182 const struct cpl_smt_write_rpl *p = (const void *)rsp;
184 cxgbe_do_smt_write_rpl(q->adapter, p);
186 dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
194 * Setup sge control queues to pass control information.
196 int cxgbe_setup_sge_ctrl_txq(struct adapter *adapter)
198 struct sge *s = &adapter->sge;
201 for_each_port(adapter, i) {
202 struct port_info *pi = adap2pinfo(adapter, i);
203 char name[RTE_ETH_NAME_MAX_LEN];
204 struct sge_ctrl_txq *q = &s->ctrlq[i];
207 err = t4_sge_alloc_ctrl_txq(adapter, q,
212 dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
216 snprintf(name, sizeof(name), "%s_ctrl_pool_%d",
217 pi->eth_dev->device->driver->name,
218 pi->eth_dev->data->port_id);
219 q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
222 RTE_MBUF_DEFAULT_BUF_SIZE,
227 "Can't create ctrl pool for port %d. Err: %d\n",
228 pi->eth_dev->data->port_id, err);
234 t4_free_sge_resources(adapter);
239 * cxgbe_poll_for_completion: Poll rxq for completion
241 * @ms: milliseconds to delay
242 * @cnt: number of times to poll
243 * @c: completion to check for 'done' status
245 * Polls the rxq for reples until completion is done or the count
248 int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int ms,
249 unsigned int cnt, struct t4_completion *c)
252 unsigned int work_done, budget = 32;
257 for (i = 0; i < cnt; i++) {
258 cxgbe_poll(q, NULL, budget, &work_done);
259 t4_os_lock(&c->lock);
261 t4_os_unlock(&c->lock);
264 t4_os_unlock(&c->lock);
270 int cxgbe_setup_sge_fwevtq(struct adapter *adapter)
272 struct sge *s = &adapter->sge;
276 err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev,
277 msi_idx, NULL, fwevtq_handler, -1, NULL, 0,
282 static int closest_timer(const struct sge *s, int time)
284 unsigned int i, match = 0;
285 int delta, min_delta = INT_MAX;
287 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
288 delta = time - s->timer_val[i];
291 if (delta < min_delta) {
299 static int closest_thres(const struct sge *s, int thres)
301 unsigned int i, match = 0;
302 int delta, min_delta = INT_MAX;
304 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
305 delta = thres - s->counter_val[i];
308 if (delta < min_delta) {
317 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
319 * @us: the hold-off time in us, or 0 to disable timer
320 * @cnt: the hold-off packet count, or 0 to disable counter
322 * Sets an Rx queue's interrupt hold-off time and packet count. At least
323 * one of the two needs to be enabled for the queue to generate interrupts.
325 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
328 struct adapter *adap = q->adapter;
329 unsigned int timer_val;
335 new_idx = closest_thres(&adap->sge, cnt);
336 if (q->desc && q->pktcnt_idx != new_idx) {
337 /* the queue has already been created, update it */
338 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
340 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
341 V_FW_PARAMS_PARAM_YZ(q->cntxt_id);
342 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
347 q->pktcnt_idx = new_idx;
350 timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER :
351 closest_timer(&adap->sge, us);
354 q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
356 q->intr_params = V_QINTR_TIMER_IDX(timer_val) |
357 V_QINTR_CNT_EN(cnt > 0);
362 * Allocate an active-open TID and set it to the supplied value.
364 int cxgbe_alloc_atid(struct tid_info *t, void *data)
368 t4_os_lock(&t->atid_lock);
370 union aopen_entry *p = t->afree;
372 atid = p - t->atid_tab;
377 t4_os_unlock(&t->atid_lock);
382 * Release an active-open TID.
384 void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
386 union aopen_entry *p = &t->atid_tab[atid];
388 t4_os_lock(&t->atid_lock);
392 t4_os_unlock(&t->atid_lock);
396 * Populate a TID_RELEASE WR. Caller must properly size the skb.
398 static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
400 struct cpl_tid_release *req;
402 req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
403 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
407 * Release a TID and inform HW. If we are unable to allocate the release
408 * message we defer to a work queue.
410 void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
411 unsigned short family)
413 struct rte_mbuf *mbuf;
414 struct adapter *adap = container_of(t, struct adapter, tids);
416 WARN_ON(tid >= t->ntids);
418 if (t->tid_tab[tid]) {
419 t->tid_tab[tid] = NULL;
420 rte_atomic32_dec(&t->conns_in_use);
421 if (t->hash_base && tid >= t->hash_base) {
422 if (family == FILTER_TYPE_IPV4)
423 rte_atomic32_dec(&t->hash_tids_in_use);
425 if (family == FILTER_TYPE_IPV4)
426 rte_atomic32_dec(&t->tids_in_use);
430 mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
432 mbuf->data_len = sizeof(struct cpl_tid_release);
433 mbuf->pkt_len = mbuf->data_len;
434 mk_tid_release(mbuf, tid);
435 t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
442 void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
443 unsigned short family)
445 t->tid_tab[tid] = data;
446 if (t->hash_base && tid >= t->hash_base) {
447 if (family == FILTER_TYPE_IPV4)
448 rte_atomic32_inc(&t->hash_tids_in_use);
450 if (family == FILTER_TYPE_IPV4)
451 rte_atomic32_inc(&t->tids_in_use);
454 rte_atomic32_inc(&t->conns_in_use);
460 static void tid_free(struct tid_info *t)
464 rte_bitmap_free(t->ftid_bmap);
466 if (t->ftid_bmap_array)
467 t4_os_free(t->ftid_bmap_array);
469 t4_os_free(t->tid_tab);
472 memset(t, 0, sizeof(struct tid_info));
476 * Allocate and initialize the TID tables. Returns 0 on success.
478 static int tid_init(struct tid_info *t)
481 unsigned int ftid_bmap_size;
482 unsigned int natids = t->natids;
483 unsigned int max_ftids = t->nftids;
485 ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
486 size = t->ntids * sizeof(*t->tid_tab) +
487 max_ftids * sizeof(*t->ftid_tab) +
488 natids * sizeof(*t->atid_tab);
490 t->tid_tab = t4_os_alloc(size);
494 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
495 t->ftid_tab = (struct filter_entry *)&t->atid_tab[t->natids];
496 t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
497 if (!t->ftid_bmap_array) {
502 t4_os_lock_init(&t->atid_lock);
503 t4_os_lock_init(&t->ftid_lock);
507 rte_atomic32_init(&t->tids_in_use);
508 rte_atomic32_set(&t->tids_in_use, 0);
509 rte_atomic32_init(&t->conns_in_use);
510 rte_atomic32_set(&t->conns_in_use, 0);
512 /* Setup the free list for atid_tab and clear the stid bitmap. */
515 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
516 t->afree = t->atid_tab;
519 t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
529 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
530 unsigned int us, unsigned int cnt,
531 unsigned int size, unsigned int iqe_size)
534 cxgb4_set_rspq_intr_params(q, us, cnt);
535 q->iqe_len = iqe_size;
539 int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev)
541 struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
542 struct adapter *adap = pi->adapter;
543 u16 first_txq = 0, first_rxq = 0;
544 struct sge *s = &adap->sge;
545 u16 i, max_rxqs, max_txqs;
547 max_rxqs = s->max_ethqsets;
548 max_txqs = s->max_ethqsets;
549 for_each_port(adap, i) {
550 temp_pi = adap2pinfo(adap, i);
551 if (i == pi->port_id)
554 if (max_rxqs <= temp_pi->n_rx_qsets ||
555 max_txqs <= temp_pi->n_tx_qsets)
558 first_rxq += temp_pi->n_rx_qsets;
559 first_txq += temp_pi->n_tx_qsets;
560 max_rxqs -= temp_pi->n_rx_qsets;
561 max_txqs -= temp_pi->n_tx_qsets;
564 if ((eth_dev->data->nb_rx_queues < 1) ||
565 (eth_dev->data->nb_tx_queues < 1))
568 if (eth_dev->data->nb_rx_queues > max_rxqs ||
569 eth_dev->data->nb_tx_queues > max_txqs)
572 /* We must configure RSS, since config has changed*/
573 pi->flags &= ~PORT_RSS_DONE;
575 pi->n_rx_qsets = eth_dev->data->nb_rx_queues;
576 pi->n_tx_qsets = eth_dev->data->nb_tx_queues;
577 pi->first_rxqset = first_rxq;
578 pi->first_txqset = first_txq;
583 void cxgbe_cfg_queues_free(struct adapter *adap)
585 if (adap->sge.ethtxq) {
586 rte_free(adap->sge.ethtxq);
587 adap->sge.ethtxq = NULL;
590 if (adap->sge.ethrxq) {
591 rte_free(adap->sge.ethrxq);
592 adap->sge.ethrxq = NULL;
595 adap->flags &= ~CFG_QUEUES;
598 int cxgbe_cfg_queues(struct rte_eth_dev *eth_dev)
600 struct port_info *pi = eth_dev->data->dev_private;
601 struct adapter *adap = pi->adapter;
602 struct sge *s = &adap->sge;
605 if (!(adap->flags & CFG_QUEUES)) {
606 s->ethrxq = rte_calloc_socket(NULL, s->max_ethqsets,
607 sizeof(struct sge_eth_rxq), 0,
612 s->ethtxq = rte_calloc_socket(NULL, s->max_ethqsets,
613 sizeof(struct sge_eth_txq), 0,
621 for (i = 0; i < s->max_ethqsets; i++) {
622 struct sge_eth_rxq *r = &s->ethrxq[i];
623 struct sge_eth_txq *t = &s->ethtxq[i];
625 init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
627 r->fl.size = (r->usembufs ? 1024 : 72);
632 init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
633 adap->flags |= CFG_QUEUES;
639 void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats)
641 t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats,
645 void cxgbe_stats_reset(struct port_info *pi)
647 t4_clr_port_stats(pi->adapter, pi->tx_chan);
650 static void setup_memwin(struct adapter *adap)
654 /* For T5, only relative offset inside the PCIe BAR is passed */
655 mem_win0_base = MEMWIN0_BASE;
658 * Set up memory window for accessing adapter memory ranges. (Read
659 * back MA register to ensure that changes propagate before we attempt
660 * to use the new values.)
663 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
665 mem_win0_base | V_BIR(0) |
666 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
668 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
672 int cxgbe_init_rss(struct adapter *adap)
679 err = t4_init_rss_mode(adap, adap->mbox);
684 for_each_port(adap, i) {
685 struct port_info *pi = adap2pinfo(adap, i);
687 pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
691 pi->rss_hf = CXGBE_RSS_HF_ALL;
697 * Dump basic information about the adapter.
699 void cxgbe_print_adapter_info(struct adapter *adap)
702 * Hardware/Firmware/etc. Version/Revision IDs.
704 t4_dump_version_info(adap);
707 void cxgbe_print_port_info(struct adapter *adap)
711 struct rte_pci_addr *loc = &adap->pdev->addr;
713 for_each_port(adap, i) {
714 const struct port_info *pi = adap2pinfo(adap, i);
717 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
718 bufp += sprintf(bufp, "100M/");
719 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
720 bufp += sprintf(bufp, "1G/");
721 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
722 bufp += sprintf(bufp, "10G/");
723 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
724 bufp += sprintf(bufp, "25G/");
725 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
726 bufp += sprintf(bufp, "40G/");
727 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
728 bufp += sprintf(bufp, "50G/");
729 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
730 bufp += sprintf(bufp, "100G/");
733 sprintf(bufp, "BASE-%s",
734 t4_get_port_type_description(
735 (enum fw_port_type)pi->link_cfg.port_type));
738 " " PCI_PRI_FMT " Chelsio rev %d %s %s\n",
739 loc->domain, loc->bus, loc->devid, loc->function,
740 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
741 (adap->flags & USING_MSIX) ? " MSI-X" :
742 (adap->flags & USING_MSI) ? " MSI" : "");
746 static int check_devargs_handler(const char *key, const char *value, void *p)
748 if (!strncmp(key, CXGBE_DEVARG_CMN_KEEP_OVLAN, strlen(key)) ||
749 !strncmp(key, CXGBE_DEVARG_CMN_TX_MODE_LATENCY, strlen(key)) ||
750 !strncmp(key, CXGBE_DEVARG_VF_FORCE_LINK_UP, strlen(key))) {
751 if (!strncmp(value, "1", 1)) {
752 bool *dst_val = (bool *)p;
758 if (!strncmp(key, CXGBE_DEVARG_PF_FILTER_MODE, strlen(key)) ||
759 !strncmp(key, CXGBE_DEVARG_PF_FILTER_MASK, strlen(key))) {
760 u32 *dst_val = (u32 *)p;
764 arg_val = strtoul(value, &endptr, 16);
765 if (errno || endptr == value)
774 static int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key,
777 struct rte_kvargs *kvlist;
783 kvlist = rte_kvargs_parse(devargs->args, NULL);
787 if (!rte_kvargs_count(kvlist, key))
790 ret = rte_kvargs_process(kvlist, key, check_devargs_handler, p);
793 rte_kvargs_free(kvlist);
798 static void cxgbe_get_devargs_int(struct adapter *adap, bool *dst,
799 const char *key, bool default_value)
801 struct rte_pci_device *pdev = adap->pdev;
803 bool devarg_value = default_value;
805 *dst = default_value;
809 ret = cxgbe_get_devargs(pdev->device.devargs, key, &devarg_value);
816 static void cxgbe_get_devargs_u32(struct adapter *adap, u32 *dst,
817 const char *key, u32 default_value)
819 struct rte_pci_device *pdev = adap->pdev;
820 u32 devarg_value = default_value;
823 *dst = default_value;
827 ret = cxgbe_get_devargs(pdev->device.devargs, key, &devarg_value);
834 void cxgbe_process_devargs(struct adapter *adap)
836 cxgbe_get_devargs_int(adap, &adap->devargs.keep_ovlan,
837 CXGBE_DEVARG_CMN_KEEP_OVLAN, false);
838 cxgbe_get_devargs_int(adap, &adap->devargs.tx_mode_latency,
839 CXGBE_DEVARG_CMN_TX_MODE_LATENCY, false);
840 cxgbe_get_devargs_int(adap, &adap->devargs.force_link_up,
841 CXGBE_DEVARG_VF_FORCE_LINK_UP, false);
842 cxgbe_get_devargs_u32(adap, &adap->devargs.filtermode,
843 CXGBE_DEVARG_PF_FILTER_MODE, 0);
844 cxgbe_get_devargs_u32(adap, &adap->devargs.filtermask,
845 CXGBE_DEVARG_PF_FILTER_MASK, 0);
848 static void configure_vlan_types(struct adapter *adapter)
852 for_each_port(adapter, i) {
853 /* OVLAN Type 0x88a8 */
854 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
855 V_OVLAN_MASK(M_OVLAN_MASK) |
856 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
857 V_OVLAN_MASK(M_OVLAN_MASK) |
858 V_OVLAN_ETYPE(0x88a8));
859 /* OVLAN Type 0x9100 */
860 t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
861 V_OVLAN_MASK(M_OVLAN_MASK) |
862 V_OVLAN_ETYPE(M_OVLAN_ETYPE),
863 V_OVLAN_MASK(M_OVLAN_MASK) |
864 V_OVLAN_ETYPE(0x9100));
867 t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
868 V_IVLAN_ETYPE(M_IVLAN_ETYPE),
869 V_IVLAN_ETYPE(0x8100));
871 t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
872 F_OVLAN_EN0 | F_OVLAN_EN1 |
874 F_OVLAN_EN0 | F_OVLAN_EN1 |
878 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, V_RM_OVLAN(1),
879 V_RM_OVLAN(!adapter->devargs.keep_ovlan));
882 static int cxgbe_get_filter_vnic_mode_from_devargs(u32 val)
886 vnic_mode = val & (CXGBE_DEVARGS_FILTER_MODE_PF_VF |
887 CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER);
890 case CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER:
891 return CXGBE_FILTER_VNIC_MODE_OVLAN;
892 case CXGBE_DEVARGS_FILTER_MODE_PF_VF:
893 return CXGBE_FILTER_VNIC_MODE_PFVF;
899 return CXGBE_FILTER_VNIC_MODE_NONE;
902 static int cxgbe_get_filter_mode_from_devargs(u32 val, bool closest_match)
904 int vnic_mode, fmode = 0;
908 if (val >= CXGBE_DEVARGS_FILTER_MODE_MAX) {
909 pr_err("Unsupported flags set in filter mode. Must be < 0x%x\n",
910 CXGBE_DEVARGS_FILTER_MODE_MAX);
914 vnic_mode = cxgbe_get_filter_vnic_mode_from_devargs(val);
916 pr_err("Unsupported Vnic-mode, more than 1 Vnic-mode selected\n");
922 if (val & CXGBE_DEVARGS_FILTER_MODE_PHYSICAL_PORT)
924 if (val & CXGBE_DEVARGS_FILTER_MODE_ETHERNET_DSTMAC)
926 if (val & CXGBE_DEVARGS_FILTER_MODE_ETHERNET_ETHTYPE)
927 fmode |= F_ETHERTYPE;
928 if (val & CXGBE_DEVARGS_FILTER_MODE_VLAN_INNER)
930 if (val & CXGBE_DEVARGS_FILTER_MODE_IP_TOS)
932 if (val & CXGBE_DEVARGS_FILTER_MODE_IP_PROTOCOL)
935 for (i = 0; i < ARRAY_SIZE(cxgbe_filter_mode_features); i++) {
936 if ((cxgbe_filter_mode_features[i] & fmode) == fmode) {
945 return closest_match ? cxgbe_filter_mode_features[i] : fmode;
948 static int configure_filter_mode_mask(struct adapter *adap)
950 u32 params[2], val[2], nparams = 0;
953 if (!adap->devargs.filtermode && !adap->devargs.filtermask)
956 if (!adap->devargs.filtermode || !adap->devargs.filtermask) {
957 pr_err("Unsupported, Provide both filtermode and filtermask devargs\n");
961 if (adap->devargs.filtermask & ~adap->devargs.filtermode) {
962 pr_err("Unsupported, filtermask (0x%x) must be subset of filtermode (0x%x)\n",
963 adap->devargs.filtermask, adap->devargs.filtermode);
968 params[0] = CXGBE_FW_PARAM_DEV(FILTER) |
969 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK);
971 ret = cxgbe_get_filter_mode_from_devargs(adap->devargs.filtermode,
974 pr_err("Unsupported filtermode devargs combination:0x%x\n",
975 adap->devargs.filtermode);
979 val[0] = V_FW_PARAMS_PARAM_FILTER_MODE(ret);
981 ret = cxgbe_get_filter_mode_from_devargs(adap->devargs.filtermask,
984 pr_err("Unsupported filtermask devargs combination:0x%x\n",
985 adap->devargs.filtermask);
989 val[0] |= V_FW_PARAMS_PARAM_FILTER_MASK(ret);
993 ret = cxgbe_get_filter_vnic_mode_from_devargs(adap->devargs.filtermode);
998 params[1] = CXGBE_FW_PARAM_DEV(FILTER) |
999 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE);
1006 return t4_set_params(adap, adap->mbox, adap->pf, 0, nparams,
1010 static void configure_pcie_ext_tag(struct adapter *adapter)
1013 int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
1019 t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v);
1020 v |= PCI_EXP_DEVCTL_EXT_TAG;
1021 t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v);
1022 if (is_t6(adapter->params.chip)) {
1023 t4_set_reg_field(adapter, A_PCIE_CFG2,
1024 V_T6_TOTMAXTAG(M_T6_TOTMAXTAG),
1026 t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
1027 V_T6_MINTAG(M_T6_MINTAG),
1030 t4_set_reg_field(adapter, A_PCIE_CFG2,
1031 V_TOTMAXTAG(M_TOTMAXTAG),
1033 t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
1040 /* Figure out how many Queue Sets we can support */
1041 void cxgbe_configure_max_ethqsets(struct adapter *adapter)
1043 unsigned int ethqsets, reserved;
1045 /* We need to reserve an Ingress Queue for the Asynchronous Firmware
1046 * Event Queue and 1 Control Queue per port.
1048 * For each Queue Set, we'll need the ability to allocate two Egress
1049 * Contexts -- one for the Ingress Queue Free List and one for the TX
1052 reserved = max(adapter->params.nports, 1);
1053 if (is_pf4(adapter)) {
1054 struct pf_resources *pfres = &adapter->params.pfres;
1056 ethqsets = min(pfres->niqflint, pfres->nethctrl);
1057 if (ethqsets > (pfres->neq / 2))
1058 ethqsets = pfres->neq / 2;
1060 struct vf_resources *vfres = &adapter->params.vfres;
1062 ethqsets = min(vfres->niqflint, vfres->nethctrl);
1063 if (ethqsets > (vfres->neq / 2))
1064 ethqsets = vfres->neq / 2;
1067 ethqsets -= reserved;
1068 adapter->sge.max_ethqsets = ethqsets;
1072 * Tweak configuration based on system architecture, etc. Most of these have
1073 * defaults assigned to them by Firmware Configuration Files (if we're using
1074 * them) but need to be explicitly set if we're using hard-coded
1075 * initialization. So these are essentially common tweaks/settings for
1076 * Configuration Files and hard-coded initialization ...
1078 static int adap_init0_tweaks(struct adapter *adapter)
1083 * Fix up various Host-Dependent Parameters like Page Size, Cache
1084 * Line Size, etc. The firmware default is for a 4KB Page Size and
1085 * 64B Cache Line Size ...
1087 t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES,
1091 * Keep the chip default offset to deliver Ingress packets into our
1092 * DMA buffers to zero
1095 t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
1096 V_PKTSHIFT(rx_dma_offset));
1098 t4_set_reg_field(adapter, A_SGE_FLM_CFG,
1099 V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
1100 V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
1102 t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,
1103 V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));
1105 t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
1106 V_IDMAARBROUNDROBIN(1U));
1109 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
1110 * adds the pseudo header itself.
1112 t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
1113 F_CSUM_HAS_PSEUDO_HDR, 0);
1119 * Attempt to initialize the adapter via a Firmware Configuration File.
1121 static int adap_init0_config(struct adapter *adapter, int reset)
1123 u32 finiver, finicsum, cfcsum, param, val;
1124 struct fw_caps_config_cmd caps_cmd;
1125 unsigned long mtype = 0, maddr = 0;
1126 u8 config_issued = 0;
1127 char config_name[20];
1131 * Reset device if necessary.
1134 ret = t4_fw_reset(adapter, adapter->mbox,
1135 F_PIORSTMODE | F_PIORST);
1137 dev_warn(adapter, "Firmware reset failed, error %d\n",
1143 cfg_addr = t4_flash_cfg_addr(adapter);
1146 dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n",
1151 strcpy(config_name, "On Flash");
1152 mtype = FW_MEMTYPE_CF_FLASH;
1155 /* Enable HASH filter region when support is available. */
1157 param = CXGBE_FW_PARAM_DEV(HASHFILTER_WITH_OFLD);
1158 t4_set_params(adapter, adapter->mbox, adapter->pf, 0, 1,
1162 * Issue a Capability Configuration command to the firmware to get it
1163 * to parse the Configuration File. We don't use t4_fw_config_file()
1164 * because we want the ability to modify various features after we've
1165 * processed the configuration file ...
1167 memset(&caps_cmd, 0, sizeof(caps_cmd));
1168 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1169 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1170 caps_cmd.cfvalid_to_len16 =
1171 cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1172 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1173 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
1174 FW_LEN16(caps_cmd));
1175 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
1178 * If the CAPS_CONFIG failed with an ENOENT (for a Firmware
1179 * Configuration File in FLASH), our last gasp effort is to use the
1180 * Firmware Configuration File which is embedded in the firmware. A
1181 * very few early versions of the firmware didn't have one embedded
1182 * but we can ignore those.
1184 if (ret == -ENOENT) {
1185 dev_info(adapter, "%s: Going for embedded config in firmware..\n",
1188 memset(&caps_cmd, 0, sizeof(caps_cmd));
1189 caps_cmd.op_to_write =
1190 cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1191 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1192 caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd));
1193 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
1194 sizeof(caps_cmd), &caps_cmd);
1195 strcpy(config_name, "Firmware Default");
1202 finiver = be32_to_cpu(caps_cmd.finiver);
1203 finicsum = be32_to_cpu(caps_cmd.finicsum);
1204 cfcsum = be32_to_cpu(caps_cmd.cfcsum);
1205 if (finicsum != cfcsum)
1206 dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n",
1210 * If we're a pure NIC driver then disable all offloading facilities.
1211 * This will allow the firmware to optimize aspects of the hardware
1212 * configuration which will result in improved performance.
1214 caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
1215 caps_cmd.toecaps = 0;
1216 caps_cmd.iscsicaps = 0;
1217 caps_cmd.rdmacaps = 0;
1218 caps_cmd.fcoecaps = 0;
1219 caps_cmd.cryptocaps = 0;
1222 * And now tell the firmware to use the configuration we just loaded.
1224 caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1225 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1226 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
1227 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
1230 dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n",
1236 * Tweak configuration based on system architecture, etc.
1238 ret = adap_init0_tweaks(adapter);
1240 dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret);
1245 * And finally tell the firmware to initialize itself using the
1246 * parameters from the Configuration File.
1248 ret = t4_fw_initialize(adapter, adapter->mbox);
1250 dev_warn(adapter, "Initializing Firmware failed, error %d\n",
1256 * Return successfully and note that we're operating with parameters
1257 * not supplied by the driver, rather than from hard-wired
1258 * initialization constants buried in the driver.
1261 "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
1262 config_name, finiver, cfcsum);
1267 * Something bad happened. Return the error ... (If the "error"
1268 * is that there's no Configuration File on the adapter we don't
1269 * want to issue a warning since this is fairly common.)
1272 if (config_issued && ret != -ENOENT)
1273 dev_warn(adapter, "\"%s\" configuration file error %d\n",
1276 dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret);
1280 static int adap_init0(struct adapter *adap)
1282 struct fw_caps_config_cmd caps_cmd;
1285 enum dev_state state;
1286 u32 params[7], val[7];
1288 int mbox = adap->mbox;
1291 * Contact FW, advertising Master capability.
1293 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
1295 dev_err(adap, "%s: could not connect to FW, error %d\n",
1300 CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__,
1304 adap->flags |= MASTER_PF;
1306 if (state == DEV_STATE_INIT) {
1308 * Force halt and reset FW because a previous instance may have
1309 * exited abnormally without properly shutting down
1311 ret = t4_fw_halt(adap, adap->mbox, reset);
1313 dev_err(adap, "Failed to halt. Exit.\n");
1317 ret = t4_fw_restart(adap, adap->mbox, reset);
1319 dev_err(adap, "Failed to restart. Exit.\n");
1322 state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
1325 t4_get_version_info(adap);
1327 ret = t4_get_core_clock(adap, &adap->params.vpd);
1329 dev_err(adap, "%s: could not get core clock, error %d\n",
1335 * If the firmware is initialized already (and we're not forcing a
1336 * master initialization), note that we're living with existing
1337 * adapter parameters. Otherwise, it's time to try initializing the
1340 if (state == DEV_STATE_INIT) {
1341 dev_info(adap, "Coming up as %s: Adapter already initialized\n",
1342 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
1344 dev_info(adap, "Coming up as MASTER: Initializing adapter\n");
1346 ret = adap_init0_config(adap, reset);
1347 if (ret == -ENOENT) {
1349 "No Configuration File present on adapter. Using hard-wired configuration parameters.\n");
1354 dev_err(adap, "could not initialize adapter, error %d\n", -ret);
1358 /* Now that we've successfully configured and initialized the adapter
1359 * (or found it already initialized), we can ask the Firmware what
1360 * resources it has provisioned for us.
1362 ret = t4_get_pfres(adap);
1364 dev_err(adap->pdev_dev,
1365 "Unable to retrieve resource provisioning info\n");
1369 /* Find out what ports are available to us. */
1370 v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
1371 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
1372 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
1374 dev_err(adap, "%s: failure in t4_query_params; error = %d\n",
1379 adap->params.nports = hweight32(port_vec);
1380 adap->params.portvec = port_vec;
1382 dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
1383 adap->params.nports);
1386 * Give the SGE code a chance to pull in anything that it needs ...
1387 * Note that this must be called after we retrieve our VPD parameters
1388 * in order to know how to convert core ticks to seconds, etc.
1390 ret = t4_sge_init(adap);
1392 dev_err(adap, "t4_sge_init failed with error %d\n",
1398 * Grab some of our basic fundamental operating parameters.
1400 params[0] = CXGBE_FW_PARAM_PFVF(L2T_START);
1401 params[1] = CXGBE_FW_PARAM_PFVF(L2T_END);
1402 params[2] = CXGBE_FW_PARAM_PFVF(FILTER_START);
1403 params[3] = CXGBE_FW_PARAM_PFVF(FILTER_END);
1404 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 4, params, val);
1407 adap->l2t_start = val[0];
1408 adap->l2t_end = val[1];
1409 adap->tids.ftid_base = val[2];
1410 adap->tids.nftids = val[3] - val[2] + 1;
1412 params[0] = CXGBE_FW_PARAM_PFVF(CLIP_START);
1413 params[1] = CXGBE_FW_PARAM_PFVF(CLIP_END);
1414 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
1417 adap->clipt_start = val[0];
1418 adap->clipt_end = val[1];
1421 * Get device capabilities so we can determine what resources we need
1424 memset(&caps_cmd, 0, sizeof(caps_cmd));
1425 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1426 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1427 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
1428 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
1433 if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
1434 is_t6(adap->params.chip)) {
1435 if (cxgbe_init_hash_filter(adap) < 0)
1439 /* See if FW supports FW_FILTER2 work request */
1440 if (is_t4(adap->params.chip)) {
1441 adap->params.filter2_wr_support = 0;
1443 params[0] = CXGBE_FW_PARAM_DEV(FILTER2_WR);
1444 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1446 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
1449 /* Check if FW supports returning vin.
1450 * If this is not supported, driver will interpret
1451 * these values from viid.
1453 params[0] = CXGBE_FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
1454 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1456 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
1458 /* query tid-related parameters */
1459 params[0] = CXGBE_FW_PARAM_DEV(NTID);
1460 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
1464 adap->tids.ntids = val[0];
1465 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
1467 /* If we're running on newer firmware, let it know that we're
1468 * prepared to deal with encapsulated CPL messages. Older
1469 * firmware won't understand this and we'll just get
1470 * unencapsulated messages ...
1472 params[0] = CXGBE_FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1474 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1477 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
1478 * capability. Earlier versions of the firmware didn't have the
1479 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
1480 * permission to use ULPTX MEMWRITE DSGL.
1482 if (is_t4(adap->params.chip)) {
1483 adap->params.ulptx_memwrite_dsgl = false;
1485 params[0] = CXGBE_FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
1486 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1488 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
1491 /* Query for max number of packets that can be coalesced for Tx */
1492 params[0] = CXGBE_FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR);
1493 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1494 if (!ret && val[0] > 0)
1495 adap->params.max_tx_coalesce_num = val[0];
1497 adap->params.max_tx_coalesce_num = ETH_COALESCE_PKT_NUM;
1499 params[0] = CXGBE_FW_PARAM_DEV(VI_ENABLE_INGRESS_AFTER_LINKUP);
1500 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
1501 adap->params.vi_enable_rx = (ret == 0 && val[0] != 0);
1504 * The MTU/MSS Table is initialized by now, so load their values. If
1505 * we're initializing the adapter, then we'll make any modifications
1506 * we want to the MTU/MSS Table and also initialize the congestion
1509 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
1510 if (state != DEV_STATE_INIT) {
1514 * The default MTU Table contains values 1492 and 1500.
1515 * However, for TCP, it's better to have two values which are
1516 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
1517 * This allows us to have a TCP Data Payload which is a
1518 * multiple of 8 regardless of what combination of TCP Options
1519 * are in use (always a multiple of 4 bytes) which is
1520 * important for performance reasons. For instance, if no
1521 * options are in use, then we have a 20-byte IP header and a
1522 * 20-byte TCP header. In this case, a 1500-byte MSS would
1523 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
1524 * which is not a multiple of 8. So using an MSS of 1488 in
1525 * this case results in a TCP Data Payload of 1448 bytes which
1526 * is a multiple of 8. On the other hand, if 12-byte TCP Time
1527 * Stamps have been negotiated, then an MTU of 1500 bytes
1528 * results in a TCP Data Payload of 1448 bytes which, as
1529 * above, is a multiple of 8 bytes ...
1531 for (i = 0; i < NMTUS; i++)
1532 if (adap->params.mtus[i] == 1492) {
1533 adap->params.mtus[i] = 1488;
1537 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
1538 adap->params.b_wnd);
1540 t4_init_sge_params(adap);
1541 ret = configure_filter_mode_mask(adap);
1544 t4_init_tp_params(adap);
1545 configure_pcie_ext_tag(adap);
1546 configure_vlan_types(adap);
1547 cxgbe_configure_max_ethqsets(adap);
1549 adap->params.drv_memwin = MEMWIN_NIC;
1550 adap->flags |= FW_OK;
1551 dev_debug(adap, "%s: returning zero..\n", __func__);
1555 * Something bad happened. If a command timed out or failed with EIO
1556 * FW does not operate within its spec or something catastrophic
1557 * happened to HW/FW, stop issuing commands.
1560 if (ret != -ETIMEDOUT && ret != -EIO)
1561 t4_fw_bye(adap, adap->mbox);
1566 * t4_os_portmod_changed - handle port module changes
1567 * @adap: the adapter associated with the module change
1568 * @port_id: the port index whose module status has changed
1570 * This is the OS-dependent handler for port module changes. It is
1571 * invoked when a port module is removed or inserted for any OS-specific
1574 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
1576 static const char * const mod_str[] = {
1577 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
1580 const struct port_info *pi = adap2pinfo(adap, port_id);
1582 if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_NONE)
1583 dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
1584 else if (pi->link_cfg.mod_type < ARRAY_SIZE(mod_str))
1585 dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
1586 mod_str[pi->link_cfg.mod_type]);
1587 else if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
1588 dev_info(adap, "Port%d: unsupported port module inserted\n",
1590 else if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
1591 dev_info(adap, "Port%d: unknown port module inserted\n",
1593 else if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_ERROR)
1594 dev_info(adap, "Port%d: transceiver module error\n",
1597 dev_info(adap, "Port%d: unknown module type %d inserted\n",
1598 pi->port_id, pi->link_cfg.mod_type);
1601 void t4_os_link_changed(struct adapter *adap, int port_id)
1603 struct port_info *pi = adap2pinfo(adap, port_id);
1605 /* If link status has not changed or if firmware doesn't
1606 * support enabling/disabling VI's Rx path during runtime,
1609 if (adap->params.vi_enable_rx == 0 ||
1610 pi->vi_en_rx == pi->link_cfg.link_ok)
1613 /* Don't enable VI Rx path, if link has been administratively
1616 if (pi->vi_en_tx == 0 && pi->vi_en_rx == 0)
1619 /* When link goes down, disable the port's Rx path to drop
1620 * Rx traffic closer to the wire, instead of processing it
1621 * further in the Rx pipeline. The Rx path will be re-enabled
1622 * once the link up message comes in firmware event queue.
1624 pi->vi_en_rx = pi->link_cfg.link_ok;
1625 t4_enable_vi(adap, adap->mbox, pi->viid, pi->vi_en_rx, pi->vi_en_tx);
1628 bool cxgbe_force_linkup(struct adapter *adap)
1631 return false; /* force_linkup not required for pf driver */
1633 return adap->devargs.force_link_up;
1637 * link_start - enable a port
1638 * @dev: the port to enable
1640 * Performs the MAC and PHY actions needed to enable a port.
1642 int cxgbe_link_start(struct port_info *pi)
1644 struct adapter *adapter = pi->adapter;
1649 mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
1650 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
1652 conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads;
1655 * We do not set address filters and promiscuity here, the stack does
1656 * that step explicitly.
1658 ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
1659 !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
1662 ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
1663 (u8 *)&pi->eth_dev->data->mac_addrs[0]);
1665 pi->xact_addr_filt = ret;
1669 if (ret == 0 && is_pf4(adapter))
1670 ret = t4_link_l1cfg(pi, pi->link_cfg.admin_caps);
1672 /* Disable VI Rx until link up message is received in
1673 * firmware event queue, if firmware supports enabling/
1674 * disabling VI Rx at runtime.
1676 pi->vi_en_rx = adapter->params.vi_enable_rx ? 0 : 1;
1678 ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
1679 pi->vi_en_rx, pi->vi_en_tx, false);
1682 if (ret == 0 && cxgbe_force_linkup(adapter))
1683 pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1688 * cxgbe_write_rss_conf - flash the RSS configuration for a given port
1690 * @rss_hf: Hash configuration to apply
1692 int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
1694 struct adapter *adapter = pi->adapter;
1695 const struct sge_eth_rxq *rxq;
1700 /* Should never be called before setting up sge eth rx queues */
1701 if (!(adapter->flags & FULL_INIT_DONE)) {
1702 dev_err(adap, "%s No RXQs available on port %d\n",
1703 __func__, pi->port_id);
1707 /* Don't allow unsupported hash functions */
1708 if (rss_hf & ~CXGBE_RSS_HF_ALL)
1711 if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
1712 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
1714 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1715 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
1717 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1718 flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
1719 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1721 if (rss_hf & CXGBE_RSS_HF_IPV6_MASK)
1722 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
1724 if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK)
1725 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
1726 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
1728 if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK)
1729 flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
1730 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
1731 F_FW_RSS_VI_CONFIG_CMD_UDPEN;
1733 rxq = &adapter->sge.ethrxq[pi->first_rxqset];
1734 rss = rxq[0].rspq.abs_id;
1736 /* If Tunnel All Lookup isn't specified in the global RSS
1737 * Configuration, then we need to specify a default Ingress
1738 * Queue for any ingress packets which aren't hashed. We'll
1739 * use our first ingress queue ...
1741 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
1747 * cxgbe_write_rss - write the RSS table for a given port
1749 * @queues: array of queue indices for RSS
1751 * Sets up the portion of the HW RSS table for the port's VI to distribute
1752 * packets to the Rx queues in @queues.
1754 int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
1758 struct adapter *adapter = pi->adapter;
1759 const struct sge_eth_rxq *rxq;
1761 /* Should never be called before setting up sge eth rx queues */
1762 BUG_ON(!(adapter->flags & FULL_INIT_DONE));
1764 rxq = &adapter->sge.ethrxq[pi->first_rxqset];
1765 rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
1769 /* map the queue indices to queue ids */
1770 for (i = 0; i < pi->rss_size; i++, queues++)
1771 rss[i] = rxq[*queues].rspq.abs_id;
1773 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
1774 pi->rss_size, rss, pi->rss_size);
1780 * setup_rss - configure RSS
1781 * @adapter: the adapter
1783 * Sets up RSS to distribute packets to multiple receive queues. We
1784 * configure the RSS CPU lookup table to distribute to the number of HW
1785 * receive queues, and the response queue lookup table to narrow that
1786 * down to the response queues actually configured for each port.
1787 * We always configure the RSS mapping for all ports since the mapping
1788 * table has plenty of entries.
1790 int cxgbe_setup_rss(struct port_info *pi)
1793 struct adapter *adapter = pi->adapter;
1795 dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n",
1796 __func__, pi->rss_size, pi->n_rx_qsets);
1798 if (!(pi->flags & PORT_RSS_DONE)) {
1799 if (adapter->flags & FULL_INIT_DONE) {
1800 /* Fill default values with equal distribution */
1801 for (j = 0; j < pi->rss_size; j++)
1802 pi->rss[j] = j % pi->n_rx_qsets;
1804 err = cxgbe_write_rss(pi, pi->rss);
1808 err = cxgbe_write_rss_conf(pi, pi->rss_hf);
1811 pi->flags |= PORT_RSS_DONE;
1818 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1820 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
1822 /* 0-increment GTS to start the timer and enable interrupts */
1823 t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
1824 T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
1825 V_SEINTARM(q->intr_params) |
1826 V_INGRESSQID(q->cntxt_id));
1829 void cxgbe_enable_rx_queues(struct port_info *pi)
1831 struct adapter *adap = pi->adapter;
1832 struct sge *s = &adap->sge;
1835 for (i = 0; i < pi->n_rx_qsets; i++)
1836 enable_rx(adap, &s->ethrxq[pi->first_rxqset + i].rspq);
1840 * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps.
1841 * @port_type: Firmware Port Type
1842 * @fw_caps: Firmware Port Capabilities
1843 * @speed_caps: Device Info Speed Capabilities
1845 * Translate a Firmware Port Capabilities specification to Device Info
1846 * Speed Capabilities.
1848 static void fw_caps_to_speed_caps(enum fw_port_type port_type,
1849 unsigned int fw_caps,
1852 #define SET_SPEED(__speed_name) \
1854 *speed_caps |= ETH_LINK_ ## __speed_name; \
1857 #define FW_CAPS_TO_SPEED(__fw_name) \
1859 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1860 SET_SPEED(__fw_name); \
1863 switch (port_type) {
1864 case FW_PORT_TYPE_BT_SGMII:
1865 case FW_PORT_TYPE_BT_XFI:
1866 case FW_PORT_TYPE_BT_XAUI:
1867 FW_CAPS_TO_SPEED(SPEED_100M);
1868 FW_CAPS_TO_SPEED(SPEED_1G);
1869 FW_CAPS_TO_SPEED(SPEED_10G);
1872 case FW_PORT_TYPE_KX4:
1873 case FW_PORT_TYPE_KX:
1874 case FW_PORT_TYPE_FIBER_XFI:
1875 case FW_PORT_TYPE_FIBER_XAUI:
1876 case FW_PORT_TYPE_SFP:
1877 case FW_PORT_TYPE_QSFP_10G:
1878 case FW_PORT_TYPE_QSA:
1879 FW_CAPS_TO_SPEED(SPEED_1G);
1880 FW_CAPS_TO_SPEED(SPEED_10G);
1883 case FW_PORT_TYPE_KR:
1884 SET_SPEED(SPEED_10G);
1887 case FW_PORT_TYPE_BP_AP:
1888 case FW_PORT_TYPE_BP4_AP:
1889 SET_SPEED(SPEED_1G);
1890 SET_SPEED(SPEED_10G);
1893 case FW_PORT_TYPE_BP40_BA:
1894 case FW_PORT_TYPE_QSFP:
1895 SET_SPEED(SPEED_40G);
1898 case FW_PORT_TYPE_CR_QSFP:
1899 case FW_PORT_TYPE_SFP28:
1900 case FW_PORT_TYPE_KR_SFP28:
1901 FW_CAPS_TO_SPEED(SPEED_1G);
1902 FW_CAPS_TO_SPEED(SPEED_10G);
1903 FW_CAPS_TO_SPEED(SPEED_25G);
1906 case FW_PORT_TYPE_CR2_QSFP:
1907 SET_SPEED(SPEED_50G);
1910 case FW_PORT_TYPE_KR4_100G:
1911 case FW_PORT_TYPE_CR4_QSFP:
1912 FW_CAPS_TO_SPEED(SPEED_25G);
1913 FW_CAPS_TO_SPEED(SPEED_40G);
1914 FW_CAPS_TO_SPEED(SPEED_50G);
1915 FW_CAPS_TO_SPEED(SPEED_100G);
1922 #undef FW_CAPS_TO_SPEED
1927 * cxgbe_get_speed_caps - Fetch supported speed capabilities
1928 * @pi: Underlying port's info
1929 * @speed_caps: Device Info speed capabilities
1931 * Fetch supported speed capabilities of the underlying port.
1933 void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
1937 fw_caps_to_speed_caps(pi->link_cfg.port_type, pi->link_cfg.pcaps,
1940 if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
1941 *speed_caps |= ETH_LINK_SPEED_FIXED;
1945 * cxgbe_set_link_status - Set device link up or down.
1946 * @pi: Underlying port's info
1947 * @status: 0 - down, 1 - up
1949 * Set the device link up or down.
1951 int cxgbe_set_link_status(struct port_info *pi, bool status)
1953 struct adapter *adapter = pi->adapter;
1956 /* Wait for link up message from firmware to enable Rx path,
1957 * if firmware supports enabling/disabling VI Rx at runtime.
1959 pi->vi_en_rx = adapter->params.vi_enable_rx ? 0 : status;
1960 pi->vi_en_tx = status;
1961 err = t4_enable_vi(adapter, adapter->mbox, pi->viid, pi->vi_en_rx,
1964 dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
1969 t4_reset_link_config(adapter, pi->pidx);
1975 * cxgb_up - enable the adapter
1976 * @adap: adapter being enabled
1978 * Called when the first port is enabled, this function performs the
1979 * actions necessary to make an adapter operational, such as completing
1980 * the initialization of HW modules, and enabling interrupts.
1982 int cxgbe_up(struct adapter *adap)
1984 enable_rx(adap, &adap->sge.fw_evtq);
1985 t4_sge_tx_monitor_start(adap);
1987 t4_intr_enable(adap);
1988 adap->flags |= FULL_INIT_DONE;
1990 /* TODO: deadman watchdog ?? */
1997 int cxgbe_down(struct port_info *pi)
1999 return cxgbe_set_link_status(pi, false);
2003 * Release resources when all the ports have been stopped.
2005 void cxgbe_close(struct adapter *adapter)
2007 if (adapter->flags & FULL_INIT_DONE) {
2008 tid_free(&adapter->tids);
2009 t4_cleanup_mpstcam(adapter);
2010 t4_cleanup_clip_tbl(adapter);
2011 t4_cleanup_l2t(adapter);
2012 t4_cleanup_smt(adapter);
2013 if (is_pf4(adapter))
2014 t4_intr_disable(adapter);
2015 t4_sge_tx_monitor_stop(adapter);
2016 t4_free_sge_resources(adapter);
2017 adapter->flags &= ~FULL_INIT_DONE;
2020 cxgbe_cfg_queues_free(adapter);
2022 if (is_pf4(adapter) && (adapter->flags & FW_OK))
2023 t4_fw_bye(adapter, adapter->mbox);
2026 static void adap_smt_index(struct adapter *adapter, u32 *smt_start_idx,
2029 u32 params[2], smt_val[2];
2032 params[0] = CXGBE_FW_PARAM_PFVF(GET_SMT_START);
2033 params[1] = CXGBE_FW_PARAM_PFVF(GET_SMT_SIZE);
2035 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2036 2, params, smt_val);
2038 /* if FW doesn't recognize this command then set it to default setting
2039 * which is start index as 0 and size as 256.
2043 *smt_size = SMT_SIZE;
2045 *smt_start_idx = smt_val[0];
2046 /* smt size can be zero, if nsmt is not yet configured in
2047 * the config file or set as zero, then configure all the
2048 * remaining entries to this PF itself.
2051 *smt_size = SMT_SIZE - *smt_start_idx;
2053 *smt_size = smt_val[1];
2057 int cxgbe_probe(struct adapter *adapter)
2059 u32 smt_start_idx, smt_size;
2060 struct port_info *pi;
2066 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2067 chip = t4_get_chip_type(adapter,
2068 CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id));
2072 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
2073 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
2075 adapter->mbox = func;
2078 t4_os_lock_init(&adapter->mbox_lock);
2079 TAILQ_INIT(&adapter->mbox_list);
2080 t4_os_lock_init(&adapter->win0_lock);
2082 err = t4_prep_adapter(adapter);
2086 setup_memwin(adapter);
2087 err = adap_init0(adapter);
2089 dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
2094 if (!is_t4(adapter->params.chip)) {
2096 * The userspace doorbell BAR is split evenly into doorbell
2097 * regions, each associated with an egress queue. If this
2098 * per-queue region is large enough (at least UDBS_SEG_SIZE)
2099 * then it can be used to submit a tx work request with an
2100 * implied doorbell. Enable write combining on the BAR if
2101 * there is room for such work requests.
2103 int s_qpp, qpp, num_seg;
2105 s_qpp = (S_QUEUESPERPAGEPF0 +
2106 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) *
2108 qpp = 1 << ((t4_read_reg(adapter,
2109 A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)
2110 & M_QUEUESPERPAGEPF0);
2111 num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE;
2113 dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
2115 adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
2116 if (!adapter->bar2) {
2117 dev_err(adapter, "cannot map device bar2 region\n");
2121 t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) |
2125 for_each_port(adapter, i) {
2126 const unsigned int numa_node = rte_socket_id();
2127 char name[RTE_ETH_NAME_MAX_LEN];
2128 struct rte_eth_dev *eth_dev;
2130 snprintf(name, sizeof(name), "%s_%d",
2131 adapter->pdev->device.name, i);
2134 /* First port is already allocated by DPDK */
2135 eth_dev = adapter->eth_dev;
2140 * now do all data allocation - for eth_dev structure,
2141 * and internal (private) data for the remaining ports
2144 /* reserve an ethdev entry */
2145 eth_dev = rte_eth_dev_allocate(name);
2149 eth_dev->data->dev_private =
2150 rte_zmalloc_socket(name, sizeof(struct port_info),
2151 RTE_CACHE_LINE_SIZE, numa_node);
2152 if (!eth_dev->data->dev_private)
2156 pi = eth_dev->data->dev_private;
2157 adapter->port[i] = pi;
2158 pi->eth_dev = eth_dev;
2159 pi->adapter = adapter;
2160 pi->xact_addr_filt = -1;
2164 pi->eth_dev->device = &adapter->pdev->device;
2165 pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
2166 pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
2167 pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
2169 rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
2171 pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
2172 RTE_ETHER_ADDR_LEN, 0);
2173 if (!pi->eth_dev->data->mac_addrs) {
2174 dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
2181 /* First port will be notified by upper layer */
2182 rte_eth_dev_probing_finish(eth_dev);
2186 if (adapter->flags & FW_OK) {
2187 err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0);
2189 dev_err(adapter, "%s: t4_port_init failed with err %d\n",
2195 err = cxgbe_cfg_queues(adapter->eth_dev);
2199 cxgbe_print_adapter_info(adapter);
2200 cxgbe_print_port_info(adapter);
2202 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
2203 adapter->clipt_end);
2204 if (!adapter->clipt) {
2205 /* We tolerate a lack of clip_table, giving up some
2208 dev_warn(adapter, "could not allocate CLIP. Continuing\n");
2211 adap_smt_index(adapter, &smt_start_idx, &smt_size);
2212 adapter->smt = t4_init_smt(smt_start_idx, smt_size);
2214 dev_warn(adapter, "could not allocate SMT, continuing\n");
2216 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
2217 if (!adapter->l2t) {
2218 /* We tolerate a lack of L2T, giving up some functionality */
2219 dev_warn(adapter, "could not allocate L2T. Continuing\n");
2222 if (tid_init(&adapter->tids) < 0) {
2223 /* Disable filtering support */
2224 dev_warn(adapter, "could not allocate TID table, "
2225 "filter support disabled. Continuing\n");
2228 t4_os_lock_init(&adapter->flow_lock);
2230 adapter->mpstcam = t4_init_mpstcam(adapter);
2231 if (!adapter->mpstcam)
2232 dev_warn(adapter, "could not allocate mps tcam table."
2235 if (is_hashfilter(adapter)) {
2236 if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
2237 u32 hash_base, hash_reg;
2239 hash_reg = A_LE_DB_TID_HASHBASE;
2240 hash_base = t4_read_reg(adapter, hash_reg);
2241 adapter->tids.hash_base = hash_base / 4;
2244 /* Disable hash filtering support */
2246 "Maskless filter support disabled. Continuing\n");
2249 err = cxgbe_init_rss(adapter);
2256 cxgbe_cfg_queues_free(adapter);
2258 for_each_port(adapter, i) {
2259 pi = adap2pinfo(adapter, i);
2261 t4_free_vi(adapter, adapter->mbox, adapter->pf,
2263 rte_eth_dev_release_port(pi->eth_dev);
2266 if (adapter->flags & FW_OK)
2267 t4_fw_bye(adapter, adapter->mbox);