return 0;
}
+/**
+ * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
+ * there's only 40-bits available to store match fields.
+ * So, to save space, optimize filter spec for some common
+ * known fields that hardware can parse against incoming
+ * packets automatically.
+ */
+static void
+cxgbe_tweak_filter_spec(struct adapter *adap,
+ struct ch_filter_specification *fs)
+{
+ /* Save 16-bit ethertype field space, by setting corresponding
+ * 1-bit flags in the filter spec for common known ethertypes.
+ * When hardware sees these flags, it automatically infers and
+ * matches incoming packets against the corresponding ethertype.
+ */
+ if (fs->mask.ethtype == 0xffff) {
+ switch (fs->val.ethtype) {
+ case RTE_ETHER_TYPE_IPV4:
+ if (adap->params.tp.ethertype_shift < 0) {
+ fs->type = FILTER_TYPE_IPV4;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ break;
+ case RTE_ETHER_TYPE_IPV6:
+ if (adap->params.tp.ethertype_shift < 0) {
+ fs->type = FILTER_TYPE_IPV6;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ break;
+ case RTE_ETHER_TYPE_VLAN:
+ if (adap->params.tp.ethertype_shift < 0 &&
+ adap->params.tp.vlan_shift >= 0) {
+ fs->val.ivlan_vld = 1;
+ fs->mask.ivlan_vld = 1;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static void
cxgbe_fill_filter_region(struct adapter *adap,
struct ch_filter_specification *fs)
ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
if (tp->macmatch_shift >= 0)
ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
+ if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
+ ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
+ tp->vlan_shift;
if (ntuple_mask != hash_filter_mask)
return;
/* If user has not given any mask, then use chelsio supported mask. */
mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
+ if (!spec)
+ return 0;
+
+ /* Chelsio hardware supports matching on only one ethertype
+ * (i.e. either the outer or inner ethertype, but not both). If
+ * we already encountered VLAN item, then ensure that the outer
+ * ethertype is VLAN (0x8100) and don't overwrite the inner
+ * ethertype stored during VLAN item parsing. Note that if
+ * 'ivlan_vld' bit is set in Chelsio filter spec, then the
+ * hardware automatically only matches packets with outer
+ * ethertype having VLAN (0x8100).
+ */
+ if (fs->mask.ivlan_vld &&
+ be16_to_cpu(spec->type) != RTE_ETHER_TYPE_VLAN)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Already encountered VLAN item,"
+ " but outer ethertype is not 0x8100");
+
/* we don't support SRC_MAC filtering*/
if (!rte_is_zero_ether_addr(&mask->src))
return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
CXGBE_FILL_FS(idx, 0x1ff, macidx);
}
- CXGBE_FILL_FS(be16_to_cpu(spec->type),
- be16_to_cpu(mask->type), ethtype);
+ /* Only set outer ethertype, if we didn't encounter VLAN item yet.
+ * Otherwise, the inner ethertype set by VLAN item will get
+ * overwritten.
+ */
+ if (!fs->mask.ivlan_vld)
+ CXGBE_FILL_FS(be16_to_cpu(spec->type),
+ be16_to_cpu(mask->type), ethtype);
return 0;
}
return 0;
}
+static int
+ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *umask = item->mask;
+ const struct rte_flow_item_vlan *mask;
+
+ /* If user has not given any mask, then use chelsio supported mask. */
+ mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
+
+ CXGBE_FILL_FS(1, 1, ivlan_vld);
+ if (!spec)
+ return 0; /* Wildcard, match all VLAN */
+
+ /* Chelsio hardware supports matching on only one ethertype
+ * (i.e. either the outer or inner ethertype, but not both).
+ * If outer ethertype is already set and is not VLAN (0x8100),
+ * then don't proceed further. Otherwise, reset the outer
+ * ethertype, so that it can be replaced by inner ethertype.
+ * Note that the hardware will automatically match on outer
+ * ethertype 0x8100, if 'ivlan_vld' bit is set in Chelsio
+ * filter spec.
+ */
+ if (fs->mask.ethtype) {
+ if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Outer ethertype not 0x8100");
+
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+
+ CXGBE_FILL_FS(be16_to_cpu(spec->tci), be16_to_cpu(mask->tci), ivlan);
+ if (spec->inner_type)
+ CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
+ be16_to_cpu(mask->inner_type), ethtype);
+
+ return 0;
+}
+
static int
ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
struct ch_filter_specification *fs,
return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
item, "ttl/tos are not supported");
+ if (fs->mask.ethtype &&
+ (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
+ fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Couldn't find IPv4 ethertype");
fs->type = FILTER_TYPE_IPV4;
- CXGBE_FILL_FS(RTE_ETHER_TYPE_IPV4, 0xffff, ethtype);
if (!val)
return 0; /* ipv4 wild card */
item,
"tc/flow/hop are not supported");
+ if (fs->mask.ethtype &&
+ (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
+ fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Couldn't find IPv6 ethertype");
fs->type = FILTER_TYPE_IPV6;
- CXGBE_FILL_FS(RTE_ETHER_TYPE_IPV6, 0xffff, ethtype);
if (!val)
return 0; /* ipv6 wild card */
{
struct adapter *adap = ethdev2adap(f->dev);
struct ch_filter_specification fs = f->fs;
+ u8 nentries;
if (fidx >= adap->tids.nftids) {
dev_err(adap, "invalid flow index %d.\n", fidx);
return -EINVAL;
}
- if (!cxgbe_is_filter_set(&adap->tids, fidx, fs.type)) {
+
+ nentries = cxgbe_filter_slots(adap, fs.type);
+ if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
return -EINVAL;
}
cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
struct adapter *adap, unsigned int fidx)
{
- if (cxgbe_is_filter_set(&adap->tids, fidx, fs->type)) {
+ u8 nentries;
+
+ nentries = cxgbe_filter_slots(adap, fs->type);
+ if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
dev_err(adap, "filter index: %d is busy.\n", fidx);
return -EBUSY;
}
+
if (fidx >= adap->tids.nftids) {
dev_err(adap, "filter index (%u) >= max(%u)\n",
fidx, adap->tids.nftids);
/* For tcam get the next available slot, if default value specified */
if (flow->fidx == FILTER_ID_MAX) {
+ u8 nentries;
int idx;
- idx = cxgbe_alloc_ftid(adap, fs->type);
+ nentries = cxgbe_filter_slots(adap, fs->type);
+ idx = cxgbe_alloc_ftid(adap, nentries);
if (idx < 0) {
dev_err(adap, "unable to get a filter index in tcam\n");
return -ENOMEM;
struct rte_flow_error *e)
{
const struct rte_flow_action_of_set_vlan_vid *vlanid;
+ const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
const struct rte_flow_action_of_push_vlan *pushvlan;
const struct rte_flow_action_set_ipv4 *ipv4;
const struct rte_flow_action_set_ipv6 *ipv6;
const struct rte_flow_action_set_tp *tp_port;
const struct rte_flow_action_phy_port *port;
int item_index;
+ u16 tmp_vlan;
switch (a->type) {
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
a->conf;
- fs->newvlan = VLAN_REWRITE;
- fs->vlan = vlanid->vlan_vid;
+ /* If explicitly asked to push a new VLAN header,
+ * then don't set rewrite mode. Otherwise, the
+ * incoming VLAN packets will get their VLAN fields
+ * rewritten, instead of adding an additional outer
+ * VLAN header.
+ */
+ if (fs->newvlan != VLAN_INSERT)
+ fs->newvlan = VLAN_REWRITE;
+ tmp_vlan = fs->vlan & 0xe000;
+ fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
+ a->conf;
+ /* If explicitly asked to push a new VLAN header,
+ * then don't set rewrite mode. Otherwise, the
+ * incoming VLAN packets will get their VLAN fields
+ * rewritten, instead of adding an additional outer
+ * VLAN header.
+ */
+ if (fs->newvlan != VLAN_INSERT)
+ fs->newvlan = VLAN_REWRITE;
+ tmp_vlan = fs->vlan & 0xfff;
+ fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
pushvlan = (const struct rte_flow_action_of_push_vlan *)
a->conf;
- if (pushvlan->ethertype != RTE_ETHER_TYPE_VLAN)
+ if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
return rte_flow_error_set(e, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, a,
"only ethertype 0x8100 "
{
struct ch_filter_specification *fs = &flow->fs;
uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
+ uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
const struct rte_flow_action_queue *q;
const struct rte_flow_action *a;
char abit = 0;
fs->hitcnts = 1;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ vlan_set_vid++;
+ goto action_switch;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ vlan_set_pcp++;
+ goto action_switch;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
case RTE_FLOW_ACTION_TYPE_PHY_PORT:
}
}
+ if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "Both OF_SET_VLAN_VID and "
+ "OF_SET_VLAN_PCP must be specified");
+
if (ch_rte_parse_nat(nmode, fs))
return rte_flow_error_set(e, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, a,
}
},
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .fptr = ch_rte_parsetype_vlan,
+ .dmask = &(const struct rte_flow_item_vlan){
+ .tci = 0xffff,
+ .inner_type = 0xffff,
+ }
+ },
+
[RTE_FLOW_ITEM_TYPE_IPV4] = {
.fptr = ch_rte_parsetype_ipv4,
.dmask = &rte_flow_item_ipv4_mask,
}
cxgbe_fill_filter_region(adap, &flow->fs);
+ cxgbe_tweak_filter_spec(adap, &flow->fs);
return 0;
}
const struct rte_flow_action action[],
struct rte_flow_error *e)
{
+ struct adapter *adap = ethdev2adap(dev);
struct rte_flow *flow;
int ret;
return NULL;
}
+ t4_os_lock(&adap->flow_lock);
/* go, interact with cxgbe_filter */
ret = __cxgbe_flow_create(dev, flow);
+ t4_os_unlock(&adap->flow_lock);
if (ret) {
rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "Unable to create flow rule");
cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *e)
{
+ struct adapter *adap = ethdev2adap(dev);
int ret;
+ t4_os_lock(&adap->flow_lock);
ret = __cxgbe_flow_destroy(dev, flow);
+ t4_os_unlock(&adap->flow_lock);
if (ret)
return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
flow, "error destroying filter.");
" enabled during filter creation");
c = (struct rte_flow_query_count *)data;
+
+ t4_os_lock(&adap->flow_lock);
ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
- if (ret)
- return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
- f, "cxgbe pmd failed to"
- " perform query");
+ if (ret) {
+ rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
+ f, "cxgbe pmd failed to perform query");
+ goto out;
+ }
/* Query was successful */
c->bytes_set = 1;
if (c->reset)
cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
- return 0; /* success / partial_success */
+out:
+ t4_os_unlock(&adap->flow_lock);
+ return ret;
}
static int
struct adapter *adap = ethdev2adap(dev);
struct rte_flow *flow;
unsigned int fidx;
- int ret;
+ int ret = 0;
flow = t4_os_alloc(sizeof(struct rte_flow));
if (!flow)
"validation failed. Check f/w config file.");
}
+ t4_os_lock(&adap->flow_lock);
if (cxgbe_get_fidx(flow, &fidx)) {
- t4_os_free(flow);
- return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "no memory in tcam.");
+ ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "no memory in tcam.");
+ goto out;
}
if (cxgbe_verify_fidx(flow, fidx, 0)) {
- t4_os_free(flow);
- return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "validation failed");
+ ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "validation failed");
+ goto out;
}
+out:
+ t4_os_unlock(&adap->flow_lock);
t4_os_free(flow);
- return 0;
+ return ret;
}
/*
* == 1 filter not active / not found
*/
static int
-cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
- struct rte_flow_error *e)
+cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
{
if (f && (f->valid || f->pending) &&
f->dev == dev && /* Only if user has asked for this port */
f->private) /* We (rte_flow) created this filter */
- return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
- e);
+ return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
return 1;
}
unsigned int i;
int ret = 0;
+ t4_os_lock(&adap->flow_lock);
if (adap->tids.ftid_tab) {
struct filter_entry *f = &adap->tids.ftid_tab[0];
for (i = 0; i < adap->tids.nftids; i++, f++) {
- ret = cxgbe_check_n_destroy(f, dev, e);
- if (ret < 0)
+ ret = cxgbe_check_n_destroy(f, dev);
+ if (ret < 0) {
+ rte_flow_error_set(e, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ f->private,
+ "error destroying TCAM "
+ "filter.");
goto out;
+ }
}
}
for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
f = (struct filter_entry *)adap->tids.tid_tab[i];
- ret = cxgbe_check_n_destroy(f, dev, e);
- if (ret < 0)
+ ret = cxgbe_check_n_destroy(f, dev);
+ if (ret < 0) {
+ rte_flow_error_set(e, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ f->private,
+ "error destroying HASH "
+ "filter.");
goto out;
+ }
}
}
out:
+ t4_os_unlock(&adap->flow_lock);
return ret >= 0 ? 0 : ret;
}