net/cxgbe: support to redirect packets to egress port
authorShagun Agrawal <shaguna@chelsio.com>
Fri, 29 Jun 2018 18:12:24 +0000 (23:42 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 4 Jul 2018 20:20:41 +0000 (22:20 +0200)
Add action to redirect matched packets to specified egress physical
port without sending them to host.

Signed-off-by: Shagun Agrawal <shaguna@chelsio.com>
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
drivers/net/cxgbe/base/t4_msg.h
drivers/net/cxgbe/cxgbe_filter.c
drivers/net/cxgbe/cxgbe_filter.h
drivers/net/cxgbe/cxgbe_flow.c

index 7f4c98f..5d433c9 100644 (file)
@@ -113,6 +113,9 @@ struct work_request_hdr {
 #define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
 
 /* option 0 fields */
+#define S_TX_CHAN    2
+#define V_TX_CHAN(x) ((x) << S_TX_CHAN)
+
 #define S_DELACK    5
 #define V_DELACK(x) ((x) << S_DELACK)
 
@@ -145,6 +148,9 @@ struct work_request_hdr {
 #define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL)
 #define F_RX_CHANNEL    V_RX_CHANNEL(1U)
 
+#define S_CCTRL_ECN    27
+#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN)
+
 #define S_T5_OPT_2_VALID    31
 #define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
 #define F_T5_OPT_2_VALID    V_T5_OPT_2_VALID(1U)
index 8c5890e..7f0d380 100644 (file)
@@ -71,6 +71,15 @@ int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
 #undef S
 #undef U
 
+       /*
+        * If the user is requesting that the filter action loop
+        * matching packets back out one of our ports, make sure that
+        * the egress port is in range.
+        */
+       if (fs->action == FILTER_SWITCH &&
+           fs->eport >= adapter->params.nports)
+               return -ERANGE;
+
        /*
         * Don't allow various trivially obvious bogus out-of-range
         * values ...
@@ -419,6 +428,7 @@ static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
        req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
                                V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
                                           << 1) |
+                               V_TX_CHAN(f->fs.eport) |
                                V_ULP_MODE(ULP_MODE_NONE) |
                                F_TCAM_BYPASS | F_NON_OFFLOAD);
        req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
@@ -427,7 +437,8 @@ static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
                            F_T5_OPT_2_VALID |
                            F_RX_CHANNEL |
                            V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
-                                        (f->fs.dirsteer << 1)));
+                                        (f->fs.dirsteer << 1)) |
+                           V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
 }
 
 /**
@@ -460,6 +471,7 @@ static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
        req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
                                V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
                                           << 1) |
+                               V_TX_CHAN(f->fs.eport) |
                                V_ULP_MODE(ULP_MODE_NONE) |
                                F_TCAM_BYPASS | F_NON_OFFLOAD);
        req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
@@ -468,7 +480,8 @@ static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
                            F_T5_OPT_2_VALID |
                            F_RX_CHANNEL |
                            V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
-                                        (f->fs.dirsteer << 1)));
+                                        (f->fs.dirsteer << 1)) |
+                           V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
 }
 
 /**
@@ -666,7 +679,9 @@ int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
        fwr->del_filter_to_l2tix =
                cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
                            V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+                           V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
                            V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+                           V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
                            V_FW_FILTER_WR_PRIO(f->fs.prio));
        fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
        fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
index fac1f75..af8fa75 100644 (file)
@@ -98,6 +98,8 @@ struct ch_filter_specification {
        uint32_t dirsteer:1;    /* 0 => RSS, 1 => steer to iq */
        uint32_t iq:10;         /* ingress queue */
 
+       uint32_t eport:2;       /* egress port to switch packet out */
+
        /* Filter rule value/mask pairs. */
        struct ch_filter_tuple val;
        struct ch_filter_tuple mask;
@@ -105,7 +107,8 @@ struct ch_filter_specification {
 
 enum {
        FILTER_PASS = 0,        /* default */
-       FILTER_DROP
+       FILTER_DROP,
+       FILTER_SWITCH
 };
 
 enum filter_type {
index 823bc72..01c945f 100644 (file)
@@ -326,6 +326,28 @@ static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
        return 0;
 }
 
+static int
+ch_rte_parse_atype_switch(const struct rte_flow_action *a,
+                         struct ch_filter_specification *fs,
+                         struct rte_flow_error *e)
+{
+       const struct rte_flow_action_phy_port *port;
+
+       switch (a->type) {
+       case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+               port = (const struct rte_flow_action_phy_port *)a->conf;
+               fs->eport = port->index;
+               break;
+       default:
+               /* We are not supposed to come here */
+               return rte_flow_error_set(e, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                         "Action not supported");
+       }
+
+       return 0;
+}
+
 static int
 cxgbe_rtef_parse_actions(struct rte_flow *flow,
                         const struct rte_flow_action action[],
@@ -335,6 +357,7 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow,
        const struct rte_flow_action_queue *q;
        const struct rte_flow_action *a;
        char abit = 0;
+       int ret;
 
        for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
                switch (a->type) {
@@ -368,6 +391,19 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow,
                case RTE_FLOW_ACTION_TYPE_COUNT:
                        fs->hitcnts = 1;
                        break;
+               case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+                       /* We allow multiple switch actions, but switch is
+                        * not compatible with either queue or drop
+                        */
+                       if (abit++ && fs->action != FILTER_SWITCH)
+                               return rte_flow_error_set(e, EINVAL,
+                                               RTE_FLOW_ERROR_TYPE_ACTION, a,
+                                               "overlapping action specified");
+                       ret = ch_rte_parse_atype_switch(a, fs, e);
+                       if (ret)
+                               return ret;
+                       fs->action = FILTER_SWITCH;
+                       break;
                default:
                        /* Not supported action : return error */
                        return rte_flow_error_set(e, ENOTSUP,