struct ibv_flow_spec_tcp_udp *udp;
const char *msg;
- if (!mask ||
+ if (mask &&
((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
(uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
msg = "mlx4 does not support matching partial UDP fields";
struct ibv_flow_spec_tcp_udp *tcp;
const char *msg;
- if (!mask ||
+ if (mask &&
((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
(uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
msg = "mlx4 does not support matching partial TCP fields";
if (flow->rss)
break;
queue = action->conf;
+ if (queue->index >= priv->dev->data->nb_rx_queues) {
+ msg = "queue target index beyond number of"
+ " configured Rx queues";
+ goto exit_action_not_supported;
+ }
flow->rss = mlx4_rss_get
(priv, 0, mlx4_rss_hash_key_default, 1,
&queue->index);
ETH_RSS_NONFRAG_IPV6_TCP),
};
/* Sanity checks. */
+ for (i = 0; i < rss->num; ++i)
+ if (rss->queue[i] >=
+ priv->dev->data->nb_rx_queues)
+ break;
+ if (i != rss->num) {
+ msg = "queue index target beyond number of"
+ " configured Rx queues";
+ goto exit_action_not_supported;
+ }
if (!rte_is_power_of_2(rss->num)) {
msg = "for RSS, mlx4 requires the number of"
" queues to be a power of two";
.type = RTE_FLOW_ITEM_TYPE_END,
},
};
+ /*
+ * Round number of queues down to their previous power of 2 to
+ * comply with RSS context limitations. Extra queues silently do not
+ * get RSS by default.
+ */
+ uint32_t queues =
+ rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
+ alignas(struct rte_flow_action_rss) uint8_t rss_conf_data
+ [offsetof(struct rte_flow_action_rss, queue) +
+ sizeof(((struct rte_flow_action_rss *)0)->queue[0]) * queues];
+ struct rte_flow_action_rss *rss_conf = (void *)rss_conf_data;
struct rte_flow_action actions[] = {
{
- .type = RTE_FLOW_ACTION_TYPE_QUEUE,
- .conf = &(struct rte_flow_action_queue){
- .index = 0,
- },
+ .type = RTE_FLOW_ACTION_TYPE_RSS,
+ .conf = rss_conf,
},
{
.type = RTE_FLOW_ACTION_TYPE_END,
unsigned int i;
int err = 0;
+ /* Nothing to be done if there are no Rx queues. */
+ if (!queues)
+ goto error;
+ /* Prepare default RSS configuration. */
+ *rss_conf = (struct rte_flow_action_rss){
+ .rss_conf = NULL, /* Rely on default fallback settings. */
+ .num = queues,
+ };
+ for (i = 0; i != queues; ++i)
+ rss_conf->queue[i] = i;
/*
* Set up VLAN item if filtering is enabled and at least one VLAN
* filter is configured.
assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
assert(flow->ibv_attr->num_of_specs == 1);
assert(eth->type == IBV_FLOW_SPEC_ETH);
+ assert(flow->rss);
if (rule_vlan &&
(eth->val.vlan_tag != *rule_vlan ||
eth->mask.vlan_tag != RTE_BE16(0x0fff)))
eth->val.src_mac[j] != UINT8_C(0x00) ||
eth->mask.src_mac[j] != UINT8_C(0x00))
break;
- if (j == sizeof(mac->addr_bytes))
- break;
+ if (j != sizeof(mac->addr_bytes))
+ continue;
+ if (flow->rss->queues != queues ||
+ memcmp(flow->rss->queue_id, rss_conf->queue,
+ queues * sizeof(flow->rss->queue_id[0])))
+ continue;
+ break;
}
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
break;
}
}
+ if (flow && flow->internal) {
+ assert(flow->rss);
+ if (flow->rss->queues != queues ||
+ memcmp(flow->rss->queue_id, rss_conf->queue,
+ queues * sizeof(flow->rss->queue_id[0])))
+ flow = NULL;
+ }
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
if (priv->dev->data->promiscuous) {
return ret;
}
/* Toggle the remaining flow rules . */
- for (flow = LIST_FIRST(&priv->flows);
- flow;
- flow = LIST_NEXT(flow, next)) {
+ LIST_FOREACH(flow, &priv->flows, next) {
ret = mlx4_flow_toggle(priv, flow, priv->started, error);
if (ret)
return ret;