mk: fix permissions when using make install
[dpdk.git] / drivers / net / cxgbe / cxgbe_main.c
index 5416800..c3938e8 100644 (file)
@@ -37,6 +37,7 @@
 #include "t4_regs.h"
 #include "t4_msg.h"
 #include "cxgbe.h"
+#include "clip_tbl.h"
 
 /**
  * Allocate a chunk of memory. The allocated memory is cleared.
@@ -86,6 +87,18 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
                const struct cpl_fw6_msg *msg = (const void *)rsp;
 
                t4_handle_fw_rpl(q->adapter, msg->data);
+       } else if (opcode == CPL_ABORT_RPL_RSS) {
+               const struct cpl_abort_rpl_rss *p = (const void *)rsp;
+
+               hash_del_filter_rpl(q->adapter, p);
+       } else if (opcode == CPL_SET_TCB_RPL) {
+               const struct cpl_set_tcb_rpl *p = (const void *)rsp;
+
+               filter_rpl(q->adapter, p);
+       } else if (opcode == CPL_ACT_OPEN_RPL) {
+               const struct cpl_act_open_rpl *p = (const void *)rsp;
+
+               hash_filter_rpl(q->adapter, p);
        } else {
                dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
                        opcode);
@@ -135,6 +148,38 @@ out:
        return err;
 }
 
+/**
+ * cxgbe_poll_for_completion: Poll rxq for completion
+ * @q: rxq to poll
+ * @us: microseconds to delay
+ * @cnt: number of times to poll
+ * @c: completion to check for 'done' status
+ *
+ * Polls the rxq for reples until completion is done or the count
+ * expires.
+ */
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+                             unsigned int cnt, struct t4_completion *c)
+{
+       unsigned int i;
+       unsigned int work_done, budget = 4;
+
+       if (!c)
+               return -EINVAL;
+
+       for (i = 0; i < cnt; i++) {
+               cxgbe_poll(q, NULL, budget, &work_done);
+               t4_os_lock(&c->lock);
+               if (c->done) {
+                       t4_os_unlock(&c->lock);
+                       return 0;
+               }
+               t4_os_unlock(&c->lock);
+               udelay(us);
+       }
+       return -ETIMEDOUT;
+}
+
 int setup_sge_fwevtq(struct adapter *adapter)
 {
        struct sge *s = &adapter->sge;
@@ -226,6 +271,102 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
        return 0;
 }
 
+/**
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgbe_alloc_atid(struct tid_info *t, void *data)
+{
+       int atid = -1;
+
+       t4_os_lock(&t->atid_lock);
+       if (t->afree) {
+               union aopen_entry *p = t->afree;
+
+               atid = p - t->atid_tab;
+               t->afree = p->next;
+               p->data = data;
+               t->atids_in_use++;
+       }
+       t4_os_unlock(&t->atid_lock);
+       return atid;
+}
+
+/**
+ * Release an active-open TID.
+ */
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
+{
+       union aopen_entry *p = &t->atid_tab[atid];
+
+       t4_os_lock(&t->atid_lock);
+       p->next = t->afree;
+       t->afree = p;
+       t->atids_in_use--;
+       t4_os_unlock(&t->atid_lock);
+}
+
+/**
+ * Populate a TID_RELEASE WR.  Caller must properly size the skb.
+ */
+static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
+{
+       struct cpl_tid_release *req;
+
+       req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
+       INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
+}
+
+/**
+ * Release a TID and inform HW.  If we are unable to allocate the release
+ * message we defer to a work queue.
+ */
+void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
+                     unsigned short family)
+{
+       struct rte_mbuf *mbuf;
+       struct adapter *adap = container_of(t, struct adapter, tids);
+
+       WARN_ON(tid >= t->ntids);
+
+       if (t->tid_tab[tid]) {
+               t->tid_tab[tid] = NULL;
+               rte_atomic32_dec(&t->conns_in_use);
+               if (t->hash_base && tid >= t->hash_base) {
+                       if (family == FILTER_TYPE_IPV4)
+                               rte_atomic32_dec(&t->hash_tids_in_use);
+               } else {
+                       if (family == FILTER_TYPE_IPV4)
+                               rte_atomic32_dec(&t->tids_in_use);
+               }
+       }
+
+       mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
+       if (mbuf) {
+               mbuf->data_len = sizeof(struct cpl_tid_release);
+               mbuf->pkt_len = mbuf->data_len;
+               mk_tid_release(mbuf, tid);
+               t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
+       }
+}
+
+/**
+ * Insert a TID.
+ */
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+                     unsigned short family)
+{
+       t->tid_tab[tid] = data;
+       if (t->hash_base && tid >= t->hash_base) {
+               if (family == FILTER_TYPE_IPV4)
+                       rte_atomic32_inc(&t->hash_tids_in_use);
+       } else {
+               if (family == FILTER_TYPE_IPV4)
+                       rte_atomic32_inc(&t->tids_in_use);
+       }
+
+       rte_atomic32_inc(&t->conns_in_use);
+}
+
 /**
  * Free TID tables.
  */
@@ -251,24 +392,43 @@ static int tid_init(struct tid_info *t)
 {
        size_t size;
        unsigned int ftid_bmap_size;
+       unsigned int natids = t->natids;
        unsigned int max_ftids = t->nftids;
 
        ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
        size = t->ntids * sizeof(*t->tid_tab) +
-               max_ftids * sizeof(*t->ftid_tab);
+               max_ftids * sizeof(*t->ftid_tab) +
+               natids * sizeof(*t->atid_tab);
 
        t->tid_tab = t4_os_alloc(size);
        if (!t->tid_tab)
                return -ENOMEM;
 
-       t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->ntids];
+       t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+       t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->natids];
        t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
        if (!t->ftid_bmap_array) {
                tid_free(t);
                return -ENOMEM;
        }
 
+       t4_os_lock_init(&t->atid_lock);
        t4_os_lock_init(&t->ftid_lock);
+
+       t->afree = NULL;
+       t->atids_in_use = 0;
+       rte_atomic32_init(&t->tids_in_use);
+       rte_atomic32_set(&t->tids_in_use, 0);
+       rte_atomic32_init(&t->conns_in_use);
+       rte_atomic32_set(&t->conns_in_use, 0);
+
+       /* Setup the free list for atid_tab and clear the stid bitmap. */
+       if (natids) {
+               while (--natids)
+                       t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+               t->afree = t->atid_tab;
+       }
+
        t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
                                       ftid_bmap_size);
        if (!t->ftid_bmap) {
@@ -353,7 +513,7 @@ void cfg_queues(struct rte_eth_dev *eth_dev)
                 * We default up to # of cores queues per 1G/10G port.
                 */
                if (nb_ports)
-                       q_per_port = (MAX_ETH_QSETS -
+                       q_per_port = (s->max_ethqsets -
                                     (adap->params.nports - nb_ports)) /
                                     nb_ports;
 
@@ -377,8 +537,6 @@ void cfg_queues(struct rte_eth_dev *eth_dev)
                        qidx += pi->n_rx_qsets;
                }
 
-               s->max_ethqsets = qidx;
-
                for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
                        struct sge_eth_rxq *r = &s->ethrxq[i];
 
@@ -610,6 +768,40 @@ static void configure_pcie_ext_tag(struct adapter *adapter)
        }
 }
 
+/* Figure out how many Queue Sets we can support */
+void configure_max_ethqsets(struct adapter *adapter)
+{
+       unsigned int ethqsets;
+
+       /*
+        * We need to reserve an Ingress Queue for the Asynchronous Firmware
+        * Event Queue.
+        *
+        * For each Queue Set, we'll need the ability to allocate two Egress
+        * Contexts -- one for the Ingress Queue Free List and one for the TX
+        * Ethernet Queue.
+        */
+       if (is_pf4(adapter)) {
+               struct pf_resources *pfres = &adapter->params.pfres;
+
+               ethqsets = pfres->niqflint - 1;
+               if (pfres->neq < ethqsets * 2)
+                       ethqsets = pfres->neq / 2;
+       } else {
+               struct vf_resources *vfres = &adapter->params.vfres;
+
+               ethqsets = vfres->niqflint - 1;
+               if (vfres->nethctrl != ethqsets)
+                       ethqsets = min(vfres->nethctrl, ethqsets);
+               if (vfres->neq < ethqsets * 2)
+                       ethqsets = vfres->neq / 2;
+       }
+
+       if (ethqsets > MAX_ETH_QSETS)
+               ethqsets = MAX_ETH_QSETS;
+       adapter->sge.max_ethqsets = ethqsets;
+}
+
 /*
  * Tweak configuration based on system architecture, etc.  Most of these have
  * defaults assigned to them by Firmware Configuration Files (if we're using
@@ -748,8 +940,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
         * This will allow the firmware to optimize aspects of the hardware
         * configuration which will result in improved performance.
         */
-       caps_cmd.niccaps &= cpu_to_be16(~(FW_CAPS_CONFIG_NIC_HASHFILTER |
-                                         FW_CAPS_CONFIG_NIC_ETHOFLD));
+       caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
        caps_cmd.toecaps = 0;
        caps_cmd.iscsicaps = 0;
        caps_cmd.rdmacaps = 0;
@@ -892,6 +1083,17 @@ static int adap_init0(struct adapter *adap)
                goto bye;
        }
 
+       /* Now that we've successfully configured and initialized the adapter
+        * (or found it already initialized), we can ask the Firmware what
+        * resources it has provisioned for us.
+        */
+       ret = t4_get_pfres(adap);
+       if (ret) {
+               dev_err(adap->pdev_dev,
+                       "Unable to retrieve resource provisioning info\n");
+               goto bye;
+       }
+
        /* Find out what ports are available to us. */
        v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
            V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
@@ -941,6 +1143,14 @@ static int adap_init0(struct adapter *adap)
        adap->tids.ftid_base = val[0];
        adap->tids.nftids = val[1] - val[0] + 1;
 
+       params[0] = FW_PARAM_PFVF(CLIP_START);
+       params[1] = FW_PARAM_PFVF(CLIP_END);
+       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+       if (ret < 0)
+               goto bye;
+       adap->clipt_start = val[0];
+       adap->clipt_end = val[1];
+
        /*
         * Get device capabilities so we can determine what resources we need
         * to manage.
@@ -954,6 +1164,12 @@ static int adap_init0(struct adapter *adap)
        if (ret < 0)
                goto bye;
 
+       if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
+           is_t6(adap->params.chip)) {
+               if (init_hash_filter(adap) < 0)
+                       goto bye;
+       }
+
        /* query tid-related parameters */
        params[0] = FW_PARAM_DEV(NTID);
        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
@@ -961,6 +1177,7 @@ static int adap_init0(struct adapter *adap)
        if (ret < 0)
                goto bye;
        adap->tids.ntids = val[0];
+       adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
 
        /* If we're running on newer firmware, let it know that we're
         * prepared to deal with encapsulated CPL messages.  Older
@@ -1027,6 +1244,7 @@ static int adap_init0(struct adapter *adap)
        t4_init_tp_params(adap);
        configure_pcie_ext_tag(adap);
        configure_vlan_types(adap);
+       configure_max_ethqsets(adap);
 
        adap->params.drv_memwin = MEMWIN_NIC;
        adap->flags |= FW_OK;
@@ -1167,7 +1385,7 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
        if (rss_hf & ~CXGBE_RSS_HF_ALL)
                return -EINVAL;
 
-       if (rss_hf & ETH_RSS_IPV4)
+       if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
                flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
 
        if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
@@ -1177,14 +1395,16 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
                flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
                         F_FW_RSS_VI_CONFIG_CMD_UDPEN;
 
-       if (rss_hf & ETH_RSS_IPV6)
+       if (rss_hf & CXGBE_RSS_HF_IPV6_MASK)
                flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
-               flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+       if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK)
+               flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+                        F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
-               flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
+       if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK)
+               flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+                        F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
                         F_FW_RSS_VI_CONFIG_CMD_UDPEN;
 
        rxq = &adapter->sge.ethrxq[pi->first_qset];
@@ -1398,6 +1618,30 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
                *speed_caps |= ETH_LINK_SPEED_FIXED;
 }
 
+/**
+ * cxgbe_set_link_status - Set device link up or down.
+ * @pi: Underlying port's info
+ * @status: 0 - down, 1 - up
+ *
+ * Set the device link up or down.
+ */
+int cxgbe_set_link_status(struct port_info *pi, bool status)
+{
+       struct adapter *adapter = pi->adapter;
+       int err = 0;
+
+       err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status);
+       if (err) {
+               dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
+               return err;
+       }
+
+       if (!status)
+               t4_reset_link_config(adapter, pi->pidx);
+
+       return 0;
+}
+
 /**
  * cxgb_up - enable the adapter
  * @adap: adapter being enabled
@@ -1423,17 +1667,7 @@ int cxgbe_up(struct adapter *adap)
  */
 int cxgbe_down(struct port_info *pi)
 {
-       struct adapter *adapter = pi->adapter;
-       int err = 0;
-
-       err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false);
-       if (err) {
-               dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
-               return err;
-       }
-
-       t4_reset_link_config(adapter, pi->pidx);
-       return 0;
+       return cxgbe_set_link_status(pi, false);
 }
 
 /*
@@ -1448,6 +1682,7 @@ void cxgbe_close(struct adapter *adapter)
                if (is_pf4(adapter))
                        t4_intr_disable(adapter);
                tid_free(&adapter->tids);
+               t4_cleanup_clip_tbl(adapter);
                t4_sge_tx_monitor_stop(adapter);
                t4_free_sge_resources(adapter);
                for_each_port(adapter, i) {
@@ -1491,6 +1726,7 @@ int cxgbe_probe(struct adapter *adapter)
 
        t4_os_lock_init(&adapter->mbox_lock);
        TAILQ_INIT(&adapter->mbox_list);
+       t4_os_lock_init(&adapter->win0_lock);
 
        err = t4_prep_adapter(adapter);
        if (err)
@@ -1610,12 +1846,35 @@ allocate_mac:
        print_adapter_info(adapter);
        print_port_info(adapter);
 
+       adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+                                         adapter->clipt_end);
+       if (!adapter->clipt) {
+               /* We tolerate a lack of clip_table, giving up some
+                * functionality
+                */
+               dev_warn(adapter, "could not allocate CLIP. Continuing\n");
+       }
+
        if (tid_init(&adapter->tids) < 0) {
                /* Disable filtering support */
                dev_warn(adapter, "could not allocate TID table, "
                         "filter support disabled. Continuing\n");
        }
 
+       if (is_hashfilter(adapter)) {
+               if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
+                       u32 hash_base, hash_reg;
+
+                       hash_reg = A_LE_DB_TID_HASHBASE;
+                       hash_base = t4_read_reg(adapter, hash_reg);
+                       adapter->tids.hash_base = hash_base / 4;
+               }
+       } else {
+               /* Disable hash filtering support */
+               dev_warn(adapter,
+                        "Maskless filter support disabled. Continuing\n");
+       }
+
        err = init_rss(adapter);
        if (err)
                goto out_free;