X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcxgbe%2Fcxgbe_main.c;h=f1ac322709619a6e552f4f9753fb90a8682f0beb;hb=9fda31c3229ca6e036cae80392578ed6e5a51119;hp=a541d95ccf3daeac665249df6667cd312b4282bc;hpb=536db938a444755b09324d48a4291591a1be31a6;p=dpdk.git diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c index a541d95ccf..f1ac322709 100644 --- a/drivers/net/cxgbe/cxgbe_main.c +++ b/drivers/net/cxgbe/cxgbe_main.c @@ -20,15 +20,14 @@ #include #include #include -#include #include #include #include #include #include #include -#include -#include +#include +#include #include #include #include @@ -417,13 +416,15 @@ void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid, if (t->tid_tab[tid]) { t->tid_tab[tid] = NULL; - rte_atomic32_dec(&t->conns_in_use); + __atomic_sub_fetch(&t->conns_in_use, 1, __ATOMIC_RELAXED); if (t->hash_base && tid >= t->hash_base) { if (family == FILTER_TYPE_IPV4) - rte_atomic32_dec(&t->hash_tids_in_use); + __atomic_sub_fetch(&t->hash_tids_in_use, 1, + __ATOMIC_RELAXED); } else { if (family == FILTER_TYPE_IPV4) - rte_atomic32_dec(&t->tids_in_use); + __atomic_sub_fetch(&t->tids_in_use, 1, + __ATOMIC_RELAXED); } } @@ -445,13 +446,15 @@ void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid, t->tid_tab[tid] = data; if (t->hash_base && tid >= t->hash_base) { if (family == FILTER_TYPE_IPV4) - rte_atomic32_inc(&t->hash_tids_in_use); + __atomic_add_fetch(&t->hash_tids_in_use, 1, + __ATOMIC_RELAXED); } else { if (family == FILTER_TYPE_IPV4) - rte_atomic32_inc(&t->tids_in_use); + __atomic_add_fetch(&t->tids_in_use, 1, + __ATOMIC_RELAXED); } - rte_atomic32_inc(&t->conns_in_use); + __atomic_add_fetch(&t->conns_in_use, 1, __ATOMIC_RELAXED); } /** @@ -504,10 +507,8 @@ static int tid_init(struct tid_info *t) t->afree = NULL; t->atids_in_use = 0; - rte_atomic32_init(&t->tids_in_use); - rte_atomic32_set(&t->tids_in_use, 0); - rte_atomic32_init(&t->conns_in_use); - rte_atomic32_set(&t->conns_in_use, 0); + t->tids_in_use = 0; + t->conns_in_use = 0; /* Setup the free list for atid_tab and clear the stid bitmap. */ if (natids) { @@ -526,22 +527,6 @@ static int tid_init(struct tid_info *t) return 0; } -static inline bool is_x_1g_port(const struct link_config *lc) -{ - return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0; -} - -static inline bool is_x_10g_port(const struct link_config *lc) -{ - unsigned int speeds, high_speeds; - - speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps)); - high_speeds = speeds & - ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); - - return high_speeds != 0; -} - static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us, unsigned int cnt, unsigned int size, unsigned int iqe_size) @@ -554,20 +539,35 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev) { - struct port_info *pi = eth_dev->data->dev_private; + struct port_info *temp_pi, *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; + u16 first_txq = 0, first_rxq = 0; struct sge *s = &adap->sge; - unsigned int max_queues = s->max_ethqsets / adap->params.nports; + u16 i, max_rxqs, max_txqs; + + max_rxqs = s->max_ethqsets; + max_txqs = s->max_ethqsets; + for_each_port(adap, i) { + temp_pi = adap2pinfo(adap, i); + if (i == pi->port_id) + break; + + if (max_rxqs <= temp_pi->n_rx_qsets || + max_txqs <= temp_pi->n_tx_qsets) + return -ENOMEM; + + first_rxq += temp_pi->n_rx_qsets; + first_txq += temp_pi->n_tx_qsets; + max_rxqs -= temp_pi->n_rx_qsets; + max_txqs -= temp_pi->n_tx_qsets; + } if ((eth_dev->data->nb_rx_queues < 1) || (eth_dev->data->nb_tx_queues < 1)) return -EINVAL; - if ((eth_dev->data->nb_rx_queues > max_queues) || - (eth_dev->data->nb_tx_queues > max_queues)) - return -EINVAL; - - if (eth_dev->data->nb_rx_queues > pi->rss_size) + if (eth_dev->data->nb_rx_queues > max_rxqs || + eth_dev->data->nb_tx_queues > max_txqs) return -EINVAL; /* We must configure RSS, since config has changed*/ @@ -575,68 +575,66 @@ int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev) pi->n_rx_qsets = eth_dev->data->nb_rx_queues; pi->n_tx_qsets = eth_dev->data->nb_tx_queues; + pi->first_rxqset = first_rxq; + pi->first_txqset = first_txq; return 0; } -void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev) +void cxgbe_cfg_queues_free(struct adapter *adap) +{ + if (adap->sge.ethtxq) { + rte_free(adap->sge.ethtxq); + adap->sge.ethtxq = NULL; + } + + if (adap->sge.ethrxq) { + rte_free(adap->sge.ethrxq); + adap->sge.ethrxq = NULL; + } + + adap->flags &= ~CFG_QUEUES; +} + +int cxgbe_cfg_queues(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; struct sge *s = &adap->sge; - unsigned int i, nb_ports = 0, qidx = 0; - unsigned int q_per_port = 0; + u16 i; if (!(adap->flags & CFG_QUEUES)) { - for_each_port(adap, i) { - struct port_info *tpi = adap2pinfo(adap, i); - - nb_ports += (is_x_10g_port(&tpi->link_cfg)) || - is_x_1g_port(&tpi->link_cfg) ? 1 : 0; - } - - /* - * We default up to # of cores queues per 1G/10G port. - */ - if (nb_ports) - q_per_port = (s->max_ethqsets - - (adap->params.nports - nb_ports)) / - nb_ports; - - if (q_per_port > rte_lcore_count()) - q_per_port = rte_lcore_count(); - - for_each_port(adap, i) { - struct port_info *pi = adap2pinfo(adap, i); - - pi->first_qset = qidx; - - /* Initially n_rx_qsets == n_tx_qsets */ - pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) || - is_x_1g_port(&pi->link_cfg)) ? - q_per_port : 1; - pi->n_tx_qsets = pi->n_rx_qsets; - - if (pi->n_rx_qsets > pi->rss_size) - pi->n_rx_qsets = pi->rss_size; + s->ethrxq = rte_calloc_socket(NULL, s->max_ethqsets, + sizeof(struct sge_eth_rxq), 0, + rte_socket_id()); + if (!s->ethrxq) + return -ENOMEM; - qidx += pi->n_rx_qsets; + s->ethtxq = rte_calloc_socket(NULL, s->max_ethqsets, + sizeof(struct sge_eth_txq), 0, + rte_socket_id()); + if (!s->ethtxq) { + rte_free(s->ethrxq); + s->ethrxq = NULL; + return -ENOMEM; } - for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { + for (i = 0; i < s->max_ethqsets; i++) { struct sge_eth_rxq *r = &s->ethrxq[i]; + struct sge_eth_txq *t = &s->ethtxq[i]; init_rspq(adap, &r->rspq, 5, 32, 1024, 64); r->usembufs = 1; r->fl.size = (r->usembufs ? 1024 : 72); - } - for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) - s->ethtxq[i].q.size = 1024; + t->q.size = 1024; + } init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64); adap->flags |= CFG_QUEUES; } + + return 0; } void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats) @@ -735,7 +733,7 @@ void cxgbe_print_port_info(struct adapter *adap) --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description( - (enum fw_port_type)pi->port_type)); + (enum fw_port_type)pi->link_cfg.port_type)); dev_info(adap, " " PCI_PRI_FMT " Chelsio rev %d %s %s\n", @@ -1043,34 +1041,31 @@ static void configure_pcie_ext_tag(struct adapter *adapter) /* Figure out how many Queue Sets we can support */ void cxgbe_configure_max_ethqsets(struct adapter *adapter) { - unsigned int ethqsets; + unsigned int ethqsets, reserved; - /* - * We need to reserve an Ingress Queue for the Asynchronous Firmware - * Event Queue. + /* We need to reserve an Ingress Queue for the Asynchronous Firmware + * Event Queue and 1 Control Queue per port. * * For each Queue Set, we'll need the ability to allocate two Egress * Contexts -- one for the Ingress Queue Free List and one for the TX * Ethernet Queue. */ + reserved = max(adapter->params.nports, 1); if (is_pf4(adapter)) { struct pf_resources *pfres = &adapter->params.pfres; - ethqsets = pfres->niqflint - 1; - if (pfres->neq < ethqsets * 2) + ethqsets = min(pfres->niqflint, pfres->nethctrl); + if (ethqsets > (pfres->neq / 2)) ethqsets = pfres->neq / 2; } else { struct vf_resources *vfres = &adapter->params.vfres; - ethqsets = vfres->niqflint - 1; - if (vfres->nethctrl != ethqsets) - ethqsets = min(vfres->nethctrl, ethqsets); - if (vfres->neq < ethqsets * 2) + ethqsets = min(vfres->niqflint, vfres->nethctrl); + if (ethqsets > (vfres->neq / 2)) ethqsets = vfres->neq / 2; } - if (ethqsets > MAX_ETH_QSETS) - ethqsets = MAX_ETH_QSETS; + ethqsets -= reserved; adapter->sge.max_ethqsets = ethqsets; } @@ -1126,13 +1121,12 @@ static int adap_init0_tweaks(struct adapter *adapter) */ static int adap_init0_config(struct adapter *adapter, int reset) { + u32 finiver, finicsum, cfcsum, param, val; struct fw_caps_config_cmd caps_cmd; unsigned long mtype = 0, maddr = 0; - u32 finiver, finicsum, cfcsum; - int ret; - int config_issued = 0; - int cfg_addr; + u8 config_issued = 0; char config_name[20]; + int cfg_addr, ret; /* * Reset device if necessary. @@ -1159,6 +1153,12 @@ static int adap_init0_config(struct adapter *adapter, int reset) mtype = FW_MEMTYPE_CF_FLASH; maddr = cfg_addr; + /* Enable HASH filter region when support is available. */ + val = 1; + param = CXGBE_FW_PARAM_DEV(HASHFILTER_WITH_OFLD); + t4_set_params(adapter, adapter->mbox, adapter->pf, 0, 1, + ¶m, &val); + /* * Issue a Capability Configuration command to the firmware to get it * to parse the Configuration File. We don't use t4_fw_config_file() @@ -1217,6 +1217,7 @@ static int adap_init0_config(struct adapter *adapter, int reset) caps_cmd.iscsicaps = 0; caps_cmd.rdmacaps = 0; caps_cmd.fcoecaps = 0; + caps_cmd.cryptocaps = 0; /* * And now tell the firmware to use the configuration we just loaded. @@ -1496,6 +1497,24 @@ static int adap_init0(struct adapter *adap) else adap->params.max_tx_coalesce_num = ETH_COALESCE_PKT_NUM; + params[0] = CXGBE_FW_PARAM_DEV(VI_ENABLE_INGRESS_AFTER_LINKUP); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); + adap->params.vi_enable_rx = (ret == 0 && val[0] != 0); + + /* Read the RAW MPS entries. In T6, the last 2 TCAM entries + * are reserved for RAW MAC addresses (rawf = 2, one per port). + */ + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + params[0] = CXGBE_FW_PARAM_PFVF(RAWF_START); + params[1] = CXGBE_FW_PARAM_PFVF(RAWF_END); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, + params, val); + if (ret == 0) { + adap->params.rawf_start = val[0]; + adap->params.rawf_size = val[1] - val[0] + 1; + } + } + /* * The MTU/MSS Table is initialized by now, so load their values. If * we're initializing the adapter, then we'll make any modifications @@ -1575,23 +1594,50 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) const struct port_info *pi = adap2pinfo(adap, port_id); - if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) + if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_NONE) dev_info(adap, "Port%d: port module unplugged\n", pi->port_id); - else if (pi->mod_type < ARRAY_SIZE(mod_str)) + else if (pi->link_cfg.mod_type < ARRAY_SIZE(mod_str)) dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id, - mod_str[pi->mod_type]); - else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) + mod_str[pi->link_cfg.mod_type]); + else if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) dev_info(adap, "Port%d: unsupported port module inserted\n", pi->port_id); - else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) + else if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_UNKNOWN) dev_info(adap, "Port%d: unknown port module inserted\n", pi->port_id); - else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) + else if (pi->link_cfg.mod_type == FW_PORT_MOD_TYPE_ERROR) dev_info(adap, "Port%d: transceiver module error\n", pi->port_id); else dev_info(adap, "Port%d: unknown module type %d inserted\n", - pi->port_id, pi->mod_type); + pi->port_id, pi->link_cfg.mod_type); +} + +void t4_os_link_changed(struct adapter *adap, int port_id) +{ + struct port_info *pi = adap2pinfo(adap, port_id); + + /* If link status has not changed or if firmware doesn't + * support enabling/disabling VI's Rx path during runtime, + * then return. + */ + if (adap->params.vi_enable_rx == 0 || + pi->vi_en_rx == pi->link_cfg.link_ok) + return; + + /* Don't enable VI Rx path, if link has been administratively + * turned off. + */ + if (pi->vi_en_tx == 0 && pi->vi_en_rx == 0) + return; + + /* When link goes down, disable the port's Rx path to drop + * Rx traffic closer to the wire, instead of processing it + * further in the Rx pipeline. The Rx path will be re-enabled + * once the link up message comes in firmware event queue. + */ + pi->vi_en_rx = pi->link_cfg.link_ok; + t4_enable_vi(adap, adap->mbox, pi->viid, pi->vi_en_rx, pi->vi_en_tx); } bool cxgbe_force_linkup(struct adapter *adap) @@ -1615,8 +1661,7 @@ int cxgbe_link_start(struct port_info *pi) unsigned int mtu; int ret; - mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN); + mtu = pi->eth_dev->data->mtu; conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads; @@ -1625,7 +1670,7 @@ int cxgbe_link_start(struct port_info *pi) * that step explicitly. */ ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1, - !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP), + !!(conf_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP), true); if (ret == 0) { ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, @@ -1636,22 +1681,20 @@ int cxgbe_link_start(struct port_info *pi) } } if (ret == 0 && is_pf4(adapter)) - ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, - &pi->link_cfg); + ret = t4_link_l1cfg(pi, pi->link_cfg.admin_caps); if (ret == 0) { - /* - * Enabling a Virtual Interface can result in an interrupt - * during the processing of the VI Enable command and, in some - * paths, result in an attempt to issue another command in the - * interrupt context. Thus, we disable interrupts during the - * course of the VI Enable command ... + /* Disable VI Rx until link up message is received in + * firmware event queue, if firmware supports enabling/ + * disabling VI Rx at runtime. */ + pi->vi_en_rx = adapter->params.vi_enable_rx ? 0 : 1; + pi->vi_en_tx = 1; ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid, - true, true, false); + pi->vi_en_rx, pi->vi_en_tx, false); } if (ret == 0 && cxgbe_force_linkup(adapter)) - pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP; + pi->eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP; return ret; } @@ -1682,10 +1725,10 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf) if (rss_hf & CXGBE_RSS_HF_IPV4_MASK) flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; - if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; - if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; @@ -1701,7 +1744,7 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf) F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; - rxq = &adapter->sge.ethrxq[pi->first_qset]; + rxq = &adapter->sge.ethrxq[pi->first_rxqset]; rss = rxq[0].rspq.abs_id; /* If Tunnel All Lookup isn't specified in the global RSS @@ -1732,7 +1775,7 @@ int cxgbe_write_rss(const struct port_info *pi, const u16 *queues) /* Should never be called before setting up sge eth rx queues */ BUG_ON(!(adapter->flags & FULL_INIT_DONE)); - rxq = &adapter->sge.ethrxq[pi->first_qset]; + rxq = &adapter->sge.ethrxq[pi->first_rxqset]; rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); if (!rss) return -ENOMEM; @@ -1804,7 +1847,7 @@ void cxgbe_enable_rx_queues(struct port_info *pi) unsigned int i; for (i = 0; i < pi->n_rx_qsets; i++) - enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq); + enable_rx(adap, &s->ethrxq[pi->first_rxqset + i].rspq); } /** @@ -1822,7 +1865,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type, { #define SET_SPEED(__speed_name) \ do { \ - *speed_caps |= ETH_LINK_ ## __speed_name; \ + *speed_caps |= RTE_ETH_LINK_ ## __speed_name; \ } while (0) #define FW_CAPS_TO_SPEED(__fw_name) \ @@ -1905,11 +1948,11 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps) { *speed_caps = 0; - fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps, + fw_caps_to_speed_caps(pi->link_cfg.port_type, pi->link_cfg.pcaps, speed_caps); if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)) - *speed_caps |= ETH_LINK_SPEED_FIXED; + *speed_caps |= RTE_ETH_LINK_SPEED_FIXED; } /** @@ -1924,7 +1967,13 @@ int cxgbe_set_link_status(struct port_info *pi, bool status) struct adapter *adapter = pi->adapter; int err = 0; - err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status); + /* Wait for link up message from firmware to enable Rx path, + * if firmware supports enabling/disabling VI Rx at runtime. + */ + pi->vi_en_rx = adapter->params.vi_enable_rx ? 0 : status; + pi->vi_en_tx = status; + err = t4_enable_vi(adapter, adapter->mbox, pi->viid, pi->vi_en_rx, + pi->vi_en_tx); if (err) { dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err); return err; @@ -1969,9 +2018,6 @@ int cxgbe_down(struct port_info *pi) */ void cxgbe_close(struct adapter *adapter) { - struct port_info *pi; - int i; - if (adapter->flags & FULL_INIT_DONE) { tid_free(&adapter->tids); t4_cleanup_mpstcam(adapter); @@ -1982,16 +2028,11 @@ void cxgbe_close(struct adapter *adapter) t4_intr_disable(adapter); t4_sge_tx_monitor_stop(adapter); t4_free_sge_resources(adapter); - for_each_port(adapter, i) { - pi = adap2pinfo(adapter, i); - if (pi->viid != 0) - t4_free_vi(adapter, adapter->mbox, - adapter->pf, 0, pi->viid); - rte_eth_dev_release_port(pi->eth_dev); - } adapter->flags &= ~FULL_INIT_DONE; } + cxgbe_cfg_queues_free(adapter); + if (is_pf4(adapter) && (adapter->flags & FW_OK)) t4_fw_bye(adapter, adapter->mbox); } @@ -2165,7 +2206,9 @@ allocate_mac: } } - cxgbe_cfg_queues(adapter->eth_dev); + err = cxgbe_cfg_queues(adapter->eth_dev); + if (err) + goto out_free; cxgbe_print_adapter_info(adapter); cxgbe_print_port_info(adapter); @@ -2224,6 +2267,8 @@ allocate_mac: return 0; out_free: + cxgbe_cfg_queues_free(adapter); + for_each_port(adapter, i) { pi = adap2pinfo(adapter, i); if (pi->viid != 0)