struct ibv_flow_spec_tcp_udp *udp;
const char *msg;
- if (!mask ||
+ if (mask &&
((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
(uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
msg = "mlx4 does not support matching partial UDP fields";
struct ibv_flow_spec_tcp_udp *tcp;
const char *msg;
- if (!mask ||
+ if (mask &&
((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
(uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
msg = "mlx4 does not support matching partial TCP fields";
.type = RTE_FLOW_ITEM_TYPE_END,
},
};
+ /*
+ * Round number of queues down to their previous power of 2 to
+ * comply with RSS context limitations. Extra queues silently do not
+ * get RSS by default.
+ */
+ uint32_t queues =
+ rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
+ alignas(struct rte_flow_action_rss) uint8_t rss_conf_data
+ [offsetof(struct rte_flow_action_rss, queue) +
+ sizeof(((struct rte_flow_action_rss *)0)->queue[0]) * queues];
+ struct rte_flow_action_rss *rss_conf = (void *)rss_conf_data;
struct rte_flow_action actions[] = {
{
- .type = RTE_FLOW_ACTION_TYPE_QUEUE,
- .conf = &(struct rte_flow_action_queue){
- .index = 0,
- },
+ .type = RTE_FLOW_ACTION_TYPE_RSS,
+ .conf = rss_conf,
},
{
.type = RTE_FLOW_ACTION_TYPE_END,
unsigned int i;
int err = 0;
+ /* Nothing to be done if there are no Rx queues. */
+ if (!queues)
+ goto error;
+ /* Prepare default RSS configuration. */
+ *rss_conf = (struct rte_flow_action_rss){
+ .rss_conf = NULL, /* Rely on default fallback settings. */
+ .num = queues,
+ };
+ for (i = 0; i != queues; ++i)
+ rss_conf->queue[i] = i;
/*
* Set up VLAN item if filtering is enabled and at least one VLAN
* filter is configured.
return ret;
}
/* Toggle the remaining flow rules . */
- for (flow = LIST_FIRST(&priv->flows);
- flow;
- flow = LIST_NEXT(flow, next)) {
+ LIST_FOREACH(flow, &priv->flows, next) {
ret = mlx4_flow_toggle(priv, flow, priv->started, error);
if (ret)
return ret;