#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
/* option 0 fields */
+#define S_TX_CHAN 2
+#define V_TX_CHAN(x) ((x) << S_TX_CHAN)
+
#define S_DELACK 5
#define V_DELACK(x) ((x) << S_DELACK)
#define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL)
#define F_RX_CHANNEL V_RX_CHANNEL(1U)
+#define S_CCTRL_ECN 27
+#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN)
+
#define S_T5_OPT_2_VALID 31
#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
#define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U)
#undef S
#undef U
+ /*
+ * If the user is requesting that the filter action loop
+ * matching packets back out one of our ports, make sure that
+ * the egress port is in range.
+ */
+ if (fs->action == FILTER_SWITCH &&
+ fs->eport >= adapter->params.nports)
+ return -ERANGE;
+
/*
* Don't allow various trivially obvious bogus out-of-range
* values ...
req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
<< 1) |
+ V_TX_CHAN(f->fs.eport) |
V_ULP_MODE(ULP_MODE_NONE) |
F_TCAM_BYPASS | F_NON_OFFLOAD);
req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
F_T5_OPT_2_VALID |
F_RX_CHANNEL |
V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)));
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
}
/**
req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
<< 1) |
+ V_TX_CHAN(f->fs.eport) |
V_ULP_MODE(ULP_MODE_NONE) |
F_TCAM_BYPASS | F_NON_OFFLOAD);
req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
F_T5_OPT_2_VALID |
F_RX_CHANNEL |
V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)));
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
}
/**
fwr->del_filter_to_l2tix =
cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+ V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+ V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
V_FW_FILTER_WR_PRIO(f->fs.prio));
fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
uint32_t iq:10; /* ingress queue */
+ uint32_t eport:2; /* egress port to switch packet out */
+
/* Filter rule value/mask pairs. */
struct ch_filter_tuple val;
struct ch_filter_tuple mask;
enum {
FILTER_PASS = 0, /* default */
- FILTER_DROP
+ FILTER_DROP,
+ FILTER_SWITCH
};
enum filter_type {
return 0;
}
+static int
+ch_rte_parse_atype_switch(const struct rte_flow_action *a,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_action_phy_port *port;
+
+ switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ port = (const struct rte_flow_action_phy_port *)a->conf;
+ fs->eport = port->index;
+ break;
+ default:
+ /* We are not supposed to come here */
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "Action not supported");
+ }
+
+ return 0;
+}
+
static int
cxgbe_rtef_parse_actions(struct rte_flow *flow,
const struct rte_flow_action action[],
const struct rte_flow_action_queue *q;
const struct rte_flow_action *a;
char abit = 0;
+ int ret;
for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
switch (a->type) {
case RTE_FLOW_ACTION_TYPE_COUNT:
fs->hitcnts = 1;
break;
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ /* We allow multiple switch actions, but switch is
+ * not compatible with either queue or drop
+ */
+ if (abit++ && fs->action != FILTER_SWITCH)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "overlapping action specified");
+ ret = ch_rte_parse_atype_switch(a, fs, e);
+ if (ret)
+ return ret;
+ fs->action = FILTER_SWITCH;
+ break;
default:
/* Not supported action : return error */
return rte_flow_error_set(e, ENOTSUP,