net/cxgbe: implement flow create operation
[dpdk.git] / drivers / net / cxgbe / cxgbe_main.c
index feb494b..a00e070 100644 (file)
@@ -1,34 +1,6 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2014-2017 Chelsio Communications.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Chelsio Communications nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
  */
 
 #include <sys/queue.h>
 #include <rte_ether.h>
 #include <rte_ethdev_driver.h>
 #include <rte_ethdev_pci.h>
-#include <rte_malloc.h>
 #include <rte_random.h>
 #include <rte_dev.h>
+#include <rte_kvargs.h>
 
 #include "common.h"
 #include "t4_regs.h"
 #include "t4_msg.h"
 #include "cxgbe.h"
 
+/**
+ * Allocate a chunk of memory. The allocated memory is cleared.
+ */
+void *t4_alloc_mem(size_t size)
+{
+       return rte_zmalloc(NULL, size, 0);
+}
+
+/**
+ * Free memory allocated through t4_alloc_mem().
+ */
+void t4_free_mem(void *addr)
+{
+       rte_free(addr);
+}
+
 /*
  * Response queue handler for the FW event queue.
  */
@@ -98,6 +86,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
                const struct cpl_fw6_msg *msg = (const void *)rsp;
 
                t4_handle_fw_rpl(q->adapter, msg->data);
+       } else if (opcode == CPL_SET_TCB_RPL) {
+               const struct cpl_set_tcb_rpl *p = (const void *)rsp;
+
+               filter_rpl(q->adapter, p);
        } else {
                dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
                        opcode);
@@ -106,6 +98,79 @@ out:
        return 0;
 }
 
+/**
+ * Setup sge control queues to pass control information.
+ */
+int setup_sge_ctrl_txq(struct adapter *adapter)
+{
+       struct sge *s = &adapter->sge;
+       int err = 0, i = 0;
+
+       for_each_port(adapter, i) {
+               char name[RTE_ETH_NAME_MAX_LEN];
+               struct sge_ctrl_txq *q = &s->ctrlq[i];
+
+               q->q.size = 1024;
+               err = t4_sge_alloc_ctrl_txq(adapter, q,
+                                           adapter->eth_dev,  i,
+                                           s->fw_evtq.cntxt_id,
+                                           rte_socket_id());
+               if (err) {
+                       dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
+                               err);
+                       goto out;
+               }
+               snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i);
+               q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
+                                                    RTE_CACHE_LINE_SIZE,
+                                                    RTE_MBUF_PRIV_ALIGN,
+                                                    RTE_MBUF_DEFAULT_BUF_SIZE,
+                                                    SOCKET_ID_ANY);
+               if (!q->mb_pool) {
+                       dev_err(adapter, "Can't create ctrl pool for port: %d",
+                               i);
+                       err = -ENOMEM;
+                       goto out;
+               }
+       }
+       return 0;
+out:
+       t4_free_sge_resources(adapter);
+       return err;
+}
+
+/**
+ * cxgbe_poll_for_completion: Poll rxq for completion
+ * @q: rxq to poll
+ * @us: microseconds to delay
+ * @cnt: number of times to poll
+ * @c: completion to check for 'done' status
+ *
+ * Polls the rxq for reples until completion is done or the count
+ * expires.
+ */
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+                             unsigned int cnt, struct t4_completion *c)
+{
+       unsigned int i;
+       unsigned int work_done, budget = 4;
+
+       if (!c)
+               return -EINVAL;
+
+       for (i = 0; i < cnt; i++) {
+               cxgbe_poll(q, NULL, budget, &work_done);
+               t4_os_lock(&c->lock);
+               if (c->done) {
+                       t4_os_unlock(&c->lock);
+                       return 0;
+               }
+               t4_os_unlock(&c->lock);
+               udelay(us);
+       }
+       return -ETIMEDOUT;
+}
+
 int setup_sge_fwevtq(struct adapter *adapter)
 {
        struct sge *s = &adapter->sge;
@@ -197,6 +262,59 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
        return 0;
 }
 
+/**
+ * Free TID tables.
+ */
+static void tid_free(struct tid_info *t)
+{
+       if (t->tid_tab) {
+               if (t->ftid_bmap)
+                       rte_bitmap_free(t->ftid_bmap);
+
+               if (t->ftid_bmap_array)
+                       t4_os_free(t->ftid_bmap_array);
+
+               t4_os_free(t->tid_tab);
+       }
+
+       memset(t, 0, sizeof(struct tid_info));
+}
+
+/**
+ * Allocate and initialize the TID tables.  Returns 0 on success.
+ */
+static int tid_init(struct tid_info *t)
+{
+       size_t size;
+       unsigned int ftid_bmap_size;
+       unsigned int max_ftids = t->nftids;
+
+       ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
+       size = t->ntids * sizeof(*t->tid_tab) +
+               max_ftids * sizeof(*t->ftid_tab);
+
+       t->tid_tab = t4_os_alloc(size);
+       if (!t->tid_tab)
+               return -ENOMEM;
+
+       t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->ntids];
+       t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
+       if (!t->ftid_bmap_array) {
+               tid_free(t);
+               return -ENOMEM;
+       }
+
+       t4_os_lock_init(&t->ftid_lock);
+       t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
+                                      ftid_bmap_size);
+       if (!t->ftid_bmap) {
+               tid_free(t);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
 static inline bool is_x_1g_port(const struct link_config *lc)
 {
        return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
@@ -346,14 +464,17 @@ static void setup_memwin(struct adapter *adap)
                                        MEMWIN_NIC));
 }
 
-static int init_rss(struct adapter *adap)
+int init_rss(struct adapter *adap)
 {
        unsigned int i;
-       int err;
 
-       err = t4_init_rss_mode(adap, adap->mbox);
-       if (err)
-               return err;
+       if (is_pf4(adap)) {
+               int err;
+
+               err = t4_init_rss_mode(adap, adap->mbox);
+               if (err)
+                       return err;
+       }
 
        for_each_port(adap, i) {
                struct port_info *pi = adap2pinfo(adap, i);
@@ -417,6 +538,84 @@ void print_port_info(struct adapter *adap)
        }
 }
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+                     __rte_unused void *opaque)
+{
+       if (strcmp(value, "1"))
+               return -1;
+
+       return 0;
+}
+
+int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+       struct rte_kvargs *kvlist;
+
+       if (!devargs)
+               return 0;
+
+       kvlist = rte_kvargs_parse(devargs->args, NULL);
+       if (!kvlist)
+               return 0;
+
+       if (!rte_kvargs_count(kvlist, key)) {
+               rte_kvargs_free(kvlist);
+               return 0;
+       }
+
+       if (rte_kvargs_process(kvlist, key,
+                              check_devargs_handler, NULL) < 0) {
+               rte_kvargs_free(kvlist);
+               return 0;
+       }
+       rte_kvargs_free(kvlist);
+
+       return 1;
+}
+
+static void configure_vlan_types(struct adapter *adapter)
+{
+       struct rte_pci_device *pdev = adapter->pdev;
+       int i;
+
+       for_each_port(adapter, i) {
+               /* OVLAN Type 0x88a8 */
+               t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
+                                V_OVLAN_MASK(M_OVLAN_MASK) |
+                                V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+                                V_OVLAN_MASK(M_OVLAN_MASK) |
+                                V_OVLAN_ETYPE(0x88a8));
+               /* OVLAN Type 0x9100 */
+               t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
+                                V_OVLAN_MASK(M_OVLAN_MASK) |
+                                V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+                                V_OVLAN_MASK(M_OVLAN_MASK) |
+                                V_OVLAN_ETYPE(0x9100));
+               /* OVLAN Type 0x8100 */
+               t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2),
+                                V_OVLAN_MASK(M_OVLAN_MASK) |
+                                V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+                                V_OVLAN_MASK(M_OVLAN_MASK) |
+                                V_OVLAN_ETYPE(0x8100));
+
+               /* IVLAN 0X8100 */
+               t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
+                                V_IVLAN_ETYPE(M_IVLAN_ETYPE),
+                                V_IVLAN_ETYPE(0x8100));
+
+               t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
+                                F_OVLAN_EN0 | F_OVLAN_EN1 |
+                                F_OVLAN_EN2 | F_IVLAN_EN,
+                                F_OVLAN_EN0 | F_OVLAN_EN1 |
+                                F_OVLAN_EN2 | F_IVLAN_EN);
+       }
+
+       if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN))
+               t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
+                                      V_RM_OVLAN(1), V_RM_OVLAN(0));
+}
+
 static void configure_pcie_ext_tag(struct adapter *adapter)
 {
        u16 v;
@@ -653,6 +852,7 @@ bye:
 
 static int adap_init0(struct adapter *adap)
 {
+       struct fw_caps_config_cmd caps_cmd;
        int ret = 0;
        u32 v, port_vec;
        enum dev_state state;
@@ -769,6 +969,35 @@ static int adap_init0(struct adapter *adap)
         V_FW_PARAMS_PARAM_Y(0) | \
         V_FW_PARAMS_PARAM_Z(0))
 
+       params[0] = FW_PARAM_PFVF(FILTER_START);
+       params[1] = FW_PARAM_PFVF(FILTER_END);
+       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+       if (ret < 0)
+               goto bye;
+       adap->tids.ftid_base = val[0];
+       adap->tids.nftids = val[1] - val[0] + 1;
+
+       /*
+        * Get device capabilities so we can determine what resources we need
+        * to manage.
+        */
+       memset(&caps_cmd, 0, sizeof(caps_cmd));
+       caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                                    F_FW_CMD_REQUEST | F_FW_CMD_READ);
+       caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+       ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
+                        &caps_cmd);
+       if (ret < 0)
+               goto bye;
+
+       /* query tid-related parameters */
+       params[0] = FW_PARAM_DEV(NTID);
+       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+                             params, val);
+       if (ret < 0)
+               goto bye;
+       adap->tids.ntids = val[0];
+
        /* If we're running on newer firmware, let it know that we're
         * prepared to deal with encapsulated CPL messages.  Older
         * firmware won't understand this and we'll just get
@@ -833,6 +1062,7 @@ static int adap_init0(struct adapter *adap)
        t4_init_sge_params(adap);
        t4_init_tp_params(adap);
        configure_pcie_ext_tag(adap);
+       configure_vlan_types(adap);
 
        adap->params.drv_memwin = MEMWIN_NIC;
        adap->flags |= FW_OK;
@@ -886,6 +1116,18 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
                         pi->port_id, pi->mod_type);
 }
 
+inline bool force_linkup(struct adapter *adap)
+{
+       struct rte_pci_device *pdev = adap->pdev;
+
+       if (is_pf4(adap))
+               return false;   /* force_linkup not required for pf driver*/
+       if (!cxgbe_get_devargs(pdev->device.devargs,
+                              CXGBE_DEVARG_FORCE_LINK_UP))
+               return false;
+       return true;
+}
+
 /**
  * link_start - enable a port
  * @dev: the port to enable
@@ -931,6 +1173,9 @@ int link_start(struct port_info *pi)
                ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
                                          true, true, false);
        }
+
+       if (ret == 0 && force_linkup(adapter))
+               pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
        return ret;
 }
 
@@ -1068,7 +1313,8 @@ int setup_rss(struct port_info *pi)
 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
 {
        /* 0-increment GTS to start the timer and enable interrupts */
-       t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),
+       t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
+                                         T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
                     V_SEINTARM(q->intr_params) |
                     V_INGRESSQID(q->cntxt_id));
 }
@@ -1237,6 +1483,7 @@ void cxgbe_close(struct adapter *adapter)
        if (adapter->flags & FULL_INIT_DONE) {
                if (is_pf4(adapter))
                        t4_intr_disable(adapter);
+               tid_free(&adapter->tids);
                t4_sge_tx_monitor_stop(adapter);
                t4_free_sge_resources(adapter);
                for_each_port(adapter, i) {
@@ -1378,6 +1625,11 @@ allocate_mac:
                        err = -1;
                        goto out_free;
                }
+
+               if (i > 0) {
+                       /* First port will be notified by upper layer */
+                       rte_eth_dev_probing_finish(eth_dev);
+               }
        }
 
        if (adapter->flags & FW_OK) {
@@ -1394,6 +1646,12 @@ allocate_mac:
        print_adapter_info(adapter);
        print_port_info(adapter);
 
+       if (tid_init(&adapter->tids) < 0) {
+               /* Disable filtering support */
+               dev_warn(adapter, "could not allocate TID table, "
+                        "filter support disabled. Continuing\n");
+       }
+
        err = init_rss(adapter);
        if (err)
                goto out_free;