/** Storage for struct rte_flow_action_rss including external data. */
struct action_rss_data {
struct rte_flow_action_rss conf;
+ uint8_t key[RSS_HASH_KEY_LENGTH];
uint16_t queue[ACTION_RSS_QUEUE_NUM];
- struct rte_eth_rss_conf rss_conf;
- uint8_t rss_key[RSS_HASH_KEY_LENGTH];
};
/** Maximum number of subsequent tokens and arguments on the stack. */
},
[ACTION_RSS_TYPES] = {
.name = "types",
- .help = "RSS hash types",
+ .help = "specific RSS hash types",
.next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
},
[ACTION_RSS_TYPE] = {
.next = NEXT(action_rss, NEXT_ENTRY(STRING)),
.args = ARGS(ARGS_ENTRY_ARB(0, 0),
ARGS_ENTRY_ARB
- (offsetof(struct action_rss_data, rss_conf) +
- offsetof(struct rte_eth_rss_conf, rss_key_len),
- sizeof(((struct rte_eth_rss_conf *)0)->
- rss_key_len)),
- ARGS_ENTRY(struct action_rss_data, rss_key)),
+ (offsetof(struct action_rss_data, conf) +
+ offsetof(struct rte_flow_action_rss, key_len),
+ sizeof(((struct rte_flow_action_rss *)0)->
+ key_len)),
+ ARGS_ENTRY(struct action_rss_data, key)),
},
[ACTION_RSS_KEY_LEN] = {
.name = "key_len",
.help = "RSS hash key length in bytes",
.next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
.args = ARGS(ARGS_ENTRY_ARB_BOUNDED
- (offsetof(struct action_rss_data, rss_conf) +
- offsetof(struct rte_eth_rss_conf, rss_key_len),
- sizeof(((struct rte_eth_rss_conf *)0)->
- rss_key_len),
+ (offsetof(struct action_rss_data, conf) +
+ offsetof(struct rte_flow_action_rss, key_len),
+ sizeof(((struct rte_flow_action_rss *)0)->
+ key_len),
0,
RSS_HASH_KEY_LENGTH)),
},
action_rss_data = ctx->object;
*action_rss_data = (struct action_rss_data){
.conf = (struct rte_flow_action_rss){
- .rss_conf = &action_rss_data->rss_conf,
- .num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
+ .types = rss_hf,
+ .key_len = sizeof(action_rss_data->key),
+ .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
+ .key = action_rss_data->key,
.queue = action_rss_data->queue,
},
+ .key = "testpmd's default RSS hash key",
.queue = { 0 },
- .rss_conf = (struct rte_eth_rss_conf){
- .rss_key = action_rss_data->rss_key,
- .rss_key_len = sizeof(action_rss_data->rss_key),
- .rss_hf = rss_hf,
- },
- .rss_key = "testpmd's default RSS hash key",
};
- for (i = 0; i < action_rss_data->conf.num; ++i)
+ for (i = 0; i < action_rss_data->conf.queue_num; ++i)
action_rss_data->queue[i] = i;
if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
ctx->port != (portid_t)RTE_PORT_ALL) {
struct rte_eth_dev_info info;
rte_eth_dev_info_get(ctx->port, &info);
- action_rss_data->rss_conf.rss_key_len =
- RTE_MIN(sizeof(action_rss_data->rss_key),
+ action_rss_data->conf.key_len =
+ RTE_MIN(sizeof(action_rss_data->key),
info.hash_key_size);
}
action->conf = &action_rss_data->conf;
return -1;
if (!(ctx->objdata >> 16) && ctx->object) {
action_rss_data = ctx->object;
- action_rss_data->rss_conf.rss_hf = 0;
+ action_rss_data->conf.types = 0;
}
if (!strcmp_partial("end", str, len)) {
ctx->objdata &= 0xffff;
if (!ctx->object)
return len;
action_rss_data = ctx->object;
- action_rss_data->rss_conf.rss_hf |= rss_type_table[i].rss_type;
+ action_rss_data->conf.types |= rss_type_table[i].rss_type;
return len;
}
if (!ctx->object)
return len;
action_rss_data = ctx->object;
- action_rss_data->conf.num = i;
+ action_rss_data->conf.queue_num = i;
action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
return len;
}
off = 0;
if (dst.rss)
*dst.rss = (struct rte_flow_action_rss){
- .num = src.rss->num,
+ .types = src.rss->types,
+ .key_len = src.rss->key_len,
+ .queue_num = src.rss->queue_num,
};
off += sizeof(*src.rss);
- if (src.rss->num) {
+ if (src.rss->key_len) {
off = RTE_ALIGN_CEIL(off, sizeof(double));
- size = sizeof(*src.rss->queue) * src.rss->num;
+ size = sizeof(*src.rss->key) * src.rss->key_len;
if (dst.rss)
- dst.rss->queue = memcpy
+ dst.rss->key = memcpy
((void *)((uintptr_t)dst.rss + off),
- src.rss->queue, size);
+ src.rss->key, size);
off += size;
}
- off = RTE_ALIGN_CEIL(off, sizeof(double));
- if (dst.rss) {
- dst.rss->rss_conf = (void *)((uintptr_t)dst.rss + off);
- *(struct rte_eth_rss_conf *)(uintptr_t)
- dst.rss->rss_conf = (struct rte_eth_rss_conf){
- .rss_key_len = src.rss->rss_conf->rss_key_len,
- .rss_hf = src.rss->rss_conf->rss_hf,
- };
- }
- off += sizeof(*src.rss->rss_conf);
- if (src.rss->rss_conf->rss_key_len) {
+ if (src.rss->queue_num) {
off = RTE_ALIGN_CEIL(off, sizeof(double));
- size = sizeof(*src.rss->rss_conf->rss_key) *
- src.rss->rss_conf->rss_key_len;
- if (dst.rss) {
- ((struct rte_eth_rss_conf *)(uintptr_t)
- dst.rss->rss_conf)->rss_key =
- (void *)((uintptr_t)dst.rss + off);
- memcpy(dst.rss->rss_conf->rss_key,
- src.rss->rss_conf->rss_key,
- size);
- }
+ size = sizeof(*src.rss->queue) * src.rss->queue_num;
+ if (dst.rss)
+ dst.rss->queue = memcpy
+ ((void *)((uintptr_t)dst.rss + off),
+ src.rss->queue, size);
off += size;
}
size = off;
Similar to QUEUE, except RSS is additionally performed on packets to spread
them among several queues according to the provided parameters.
+Unlike global RSS settings used by other DPDK APIs, unsetting the ``types``
+field does not disable RSS in a flow rule. Doing so instead requests safe
+unspecified "best-effort" settings from the underlying PMD, which depending
+on the flow rule, may result in anything ranging from empty (single queue)
+to all-inclusive RSS.
+
Note: RSS hash result is stored in the ``hash.rss`` mbuf field which
overlaps ``hash.fdir.lo``. Since `Action: MARK`_ sets the ``hash.fdir.hi``
field only, both can be requested simultaneously.
.. table:: RSS
- +--------------+--------------------------------+
- | Field | Value |
- +==============+================================+
- | ``rss_conf`` | RSS parameters |
- +--------------+--------------------------------+
- | ``num`` | number of entries in ``queue`` |
- +--------------+--------------------------------+
- | ``queue`` | queue indices to use |
- +--------------+--------------------------------+
+ +---------------+---------------------------------------------+
+ | Field | Value |
+ +===============+=============================================+
+ | ``types`` | specific RSS hash types (see ``ETH_RSS_*``) |
+ +---------------+---------------------------------------------+
+ | ``key_len`` | hash key length in bytes |
+ +---------------+---------------------------------------------+
+ | ``queue_num`` | number of entries in ``queue`` |
+ +---------------+---------------------------------------------+
+ | ``key`` | hash key |
+ +---------------+---------------------------------------------+
+ | ``queue`` | queue indices to use |
+ +---------------+---------------------------------------------+
Action: ``PF``
^^^^^^^^^^^^^^
present.
* C99-style flexible arrays were replaced with standard pointers in RSS
action and in RAW pattern item structures due to compatibility issues.
+ * The RSS action was modified to not rely on external
+ ``struct rte_eth_rss_conf`` anymore to instead expose its own and more
+ appropriately named configuration fields directly
+ (``rss_conf->rss_key`` => ``key``,
+ ``rss_conf->rss_key_len`` => ``key_len``,
+ ``rss_conf->rss_hf`` => ``types``,
+ ``num`` => ``queue_num``).
ABI Changes
``rte_flow_isolate``, ``rte_flow_query`` and ``rte_flow_validate``, due to
changes in error type definitions (``enum rte_flow_error_type``), removal
of the unused DUP action (``enum rte_flow_action_type``), modified
- behavior for flow rule actions (see API changes) and removal of C99
- flexible arrays from RSS action (``struct rte_flow_action_rss``) and RAW
- pattern item (``struct rte_flow_item_raw``).
+ behavior for flow rule actions (see API changes), removal of C99 flexible
+ array from RAW pattern item (``struct rte_flow_item_raw``) and complete
+ rework of the RSS action definition (``struct rte_flow_action_rss``).
Removed Items
- ``rss``: spread packets among several queues.
- - ``types [{RSS hash type} [...]] end``: RSS hash types, allowed tokens
- are the same as `set_hash_input_set`_, an empty list means none (0).
+ - ``types [{RSS hash type} [...]] end``: specific RSS hash types, allowed
+ tokens are the same as `set_hash_input_set`_, except that an empty list
+ does not disable RSS but instead requests unspecified "best-effort"
+ settings.
- ``key {string}``: RSS hash key, overrides ``key_len``.
#ifndef _E1000_ETHDEV_H_
#define _E1000_ETHDEV_H_
+
+#include <stdint.h>
+
+#include <rte_flow.h>
#include <rte_time.h>
#include <rte_pci.h>
#define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */
#define IGB_VFTA_SIZE 128
+#define IGB_HKEY_MAX_INDEX 10
#define IGB_MAX_RX_QUEUE_NUM 8
#define IGB_MAX_RX_QUEUE_NUM_82576 16
};
struct igb_rte_flow_rss_conf {
- struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
- uint16_t num; /**< Number of entries in queue[]. */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
};
int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add);
+int igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int igb_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
int igb_config_rss_filter(struct rte_eth_dev *dev,
struct igb_rte_flow_rss_conf *conf,
bool add);
#define IGB_DEFAULT_TX_HTHRESH 1
#define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
-#define IGB_HKEY_MAX_INDEX 10
-
/* Bit shift and mask */
#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
}
rss = (const struct rte_flow_action_rss *)act->conf;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
}
}
- if (rss->rss_conf)
- rss_conf->rss_conf = *rss->rss_conf;
- else
- rss_conf->rss_conf.rss_hf = IGB_RSS_OFFLOAD_ALL;
-
- for (n = 0; n < rss->num; ++n)
- rss_conf->queue[n] = rss->queue[n];
- rss_conf->num = rss->num;
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (igb_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
/* check if the next not void item is END */
index++;
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&rss_filter_ptr->filter_info,
- &rss_conf,
- sizeof(struct igb_rte_flow_rss_conf));
+ igb_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
TAILQ_INSERT_TAIL(&igb_filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;
struct e1000_filter_info *filter =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter->rss_info.num)
+ if (filter->rss_info.conf.queue_num)
igb_config_rss_filter(dev, &filter->rss_info, FALSE);
}
qinfo->conf.offloads = txq->offloads;
}
+int
+igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+igb_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
int
igb_config_rss_filter(struct rte_eth_dev *dev,
struct igb_rte_flow_rss_conf *conf, bool add)
{
uint32_t shift;
uint16_t i, j;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!add) {
- if (memcmp(conf, &filter_info->rss_info,
- sizeof(struct igb_rte_flow_rss_conf)) == 0) {
+ if (igb_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
igb_rss_disable(dev);
memset(&filter_info->rss_info, 0,
sizeof(struct igb_rte_flow_rss_conf));
return -EINVAL;
}
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
return -EINVAL;
/* Fill in redirection table. */
} reta;
uint8_t q_idx;
- if (j == conf->num)
+ if (j == conf->conf.queue_num)
j = 0;
- q_idx = conf->queue[j];
+ q_idx = conf->conf.queue[j];
reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
if ((i & 3) == 3)
E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
rss_conf.rss_key = rss_intel_key; /* Default hash key */
igb_hw_rss_hash_set(hw, &rss_conf);
- rte_memcpy(&filter_info->rss_info,
- conf, sizeof(struct igb_rte_flow_rss_conf));
+ if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
#include <inttypes.h>
#include <assert.h>
+#include <rte_common.h>
#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
{
struct i40e_rte_flow_rss_conf *conf =
&pf->rss_info;
- if (conf->num)
+ if (conf->conf.queue_num)
i40e_config_rss_filter(pf, conf, TRUE);
}
return ret;
}
+int
+i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+i40e_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
int
i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add)
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t i, lut = 0;
uint16_t j, num;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
if (!add) {
- if (memcmp(conf, rss_info,
- sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
+ if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
i40e_pf_disable_rss(pf);
memset(rss_info, 0,
sizeof(struct i40e_rte_flow_rss_conf));
return -EINVAL;
}
- if (rss_info->num)
+ if (rss_info->conf.queue_num)
return -EINVAL;
/* If both VMDQ and RSS enabled, not all of PF queues are configured.
else
num = pf->dev_data->nb_rx_queues;
- num = RTE_MIN(num, conf->num);
+ num = RTE_MIN(num, conf->conf.queue_num);
PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
num);
for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
if (j == num)
j = 0;
- lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
+ lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
hw->func_caps.rss_table_entry_width) - 1));
if ((i & 3) == 3)
I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
i40e_hw_rss_hash_set(pf, &rss_conf);
- rte_memcpy(rss_info,
- conf, sizeof(struct i40e_rte_flow_rss_conf));
+ if (i40e_rss_conf_init(rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
#ifndef _I40E_ETHDEV_H_
#define _I40E_ETHDEV_H_
+#include <stdint.h>
+
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
#include <rte_hash.h>
+#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_tm_driver.h>
#include "rte_pmd_i40e.h"
+#include "base/i40e_register.h"
+
#define I40E_VLAN_TAG_SIZE 4
#define I40E_AQ_LEN 32
};
struct i40e_rte_flow_rss_conf {
- struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
uint16_t queue_region_conf; /**< Queue region config flag */
- uint16_t num; /**< Number of entries in queue[]. */
+ uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
+ I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX) + 1 *
+ sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
};
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw);
int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len);
int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size);
+int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int i40e_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
int i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add);
if (action_flag) {
for (n = 0; n < 64; n++) {
- if (rss->rss_conf->rss_hf & (hf_bit << n)) {
+ if (rss->types & (hf_bit << n)) {
conf_info->region[0].hw_flowtype[0] = n;
conf_info->region[0].flowtype_num = 1;
conf_info->queue_region_number = 1;
* queue index for this port.
*/
if (conf_info->queue_region_number) {
- for (i = 0; i < rss->num; i++) {
- for (j = 0; j < rss_info->num; j++) {
- if (rss->queue[i] == rss_info->queue[j])
+ for (i = 0; i < rss->queue_num; i++) {
+ for (j = 0; j < rss_info->conf.queue_num; j++) {
+ if (rss->queue[i] == rss_info->conf.queue[j])
break;
}
- if (j == rss_info->num) {
+ if (j == rss_info->conf.queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
}
}
- for (i = 0; i < rss->num - 1; i++) {
+ for (i = 0; i < rss->queue_num - 1; i++) {
if (rss->queue[i + 1] != rss->queue[i] + 1) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
for (n = 0; n < conf_info->queue_region_number; n++) {
if (conf_info->region[n].user_priority_num ||
conf_info->region[n].flowtype_num) {
- if (!((rte_is_power_of_2(rss->num)) &&
- rss->num <= 64)) {
+ if (!((rte_is_power_of_2(rss->queue_num)) &&
+ rss->queue_num <= 64)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
}
for (i = 0; i < info->queue_region_number; i++) {
- if (info->region[i].queue_num == rss->num &&
+ if (info->region[i].queue_num ==
+ rss->queue_num &&
info->region[i].queue_start_index ==
rss->queue[0])
break;
}
info->region[i].queue_num =
- rss->num;
+ rss->queue_num;
info->region[i].queue_start_index =
rss->queue[0];
info->region[i].region_id =
if (rss_config->queue_region_conf)
return 0;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
}
/* Parse RSS related parameters from configuration */
- if (rss->rss_conf)
- rss_config->rss_conf = *rss->rss_conf;
- else
- rss_config->rss_conf.rss_hf =
- pf->adapter->flow_types_mask;
+ if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key too large");
+ if (rss->queue_num > RTE_DIM(rss_config->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (i40e_rss_conf_init(rss_config, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
- for (n = 0; n < rss->num; ++n)
- rss_config->queue[n] = rss->queue[n];
- rss_config->num = rss->num;
index++;
/* check if the next not void action is END */
ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
- if (rss_info->num)
+ if (rss_info->conf.queue_num)
ret = i40e_config_rss_filter(pf, rss_info, FALSE);
return ret;
}
#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
-#define IXGBE_HKEY_MAX_INDEX 10
-
/* Additional timesync values. */
#define NSEC_PER_SEC 1000000000L
#define IXGBE_INCVAL_10GB 0x66666666
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
ixgbe_config_rss_filter(dev,
&filter_info->rss_info, TRUE);
}
#ifndef _IXGBE_ETHDEV_H_
#define _IXGBE_ETHDEV_H_
+
+#include <stdint.h>
+
#include "base/ixgbe_type.h"
#include "base/ixgbe_dcb.h"
#include "base/ixgbe_dcb_82599.h"
#ifdef RTE_LIBRTE_SECURITY
#include "ixgbe_ipsec.h"
#endif
+#include <rte_flow.h>
#include <rte_time.h>
#include <rte_hash.h>
#include <rte_pci.h>
#define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */
#define IXGBE_VFTA_SIZE 128
#define IXGBE_VLAN_TAG_SIZE 4
+#define IXGBE_HKEY_MAX_INDEX 10
#define IXGBE_MAX_RX_QUEUE_NUM 128
#define IXGBE_MAX_INTR_QUEUE_NUM 15
#define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
};
struct ixgbe_rte_flow_rss_conf {
- struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
- uint16_t num; /**< Number of entries in queue[]. */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint8_t key[IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
};
void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t tx_rate);
+int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
int ixgbe_config_rss_filter(struct rte_eth_dev *dev,
struct ixgbe_rte_flow_rss_conf *conf, bool add);
rss = (const struct rte_flow_action_rss *)act->conf;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
return -rte_errno;
}
}
- if (rss->rss_conf)
- rss_conf->rss_conf = *rss->rss_conf;
- else
- rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
- for (n = 0; n < rss->num; ++n)
- rss_conf->queue[n] = rss->queue[n];
- rss_conf->num = rss->num;
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (ixgbe_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
}
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&rss_filter_ptr->filter_info,
- &rss_conf,
- sizeof(struct ixgbe_rte_flow_rss_conf));
+ ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
TAILQ_INSERT_TAIL(&filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;
}
}
+int
+ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
int
ixgbe_config_rss_filter(struct rte_eth_dev *dev,
struct ixgbe_rte_flow_rss_conf *conf, bool add)
uint16_t j;
uint16_t sp_reta_size;
uint32_t reta_reg;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
if (!add) {
- if (memcmp(conf, &filter_info->rss_info,
- sizeof(struct ixgbe_rte_flow_rss_conf)) == 0) {
+ if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
ixgbe_rss_disable(dev);
memset(&filter_info->rss_info, 0,
sizeof(struct ixgbe_rte_flow_rss_conf));
return -EINVAL;
}
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
return -EINVAL;
/* Fill in redirection table
* The byte-swap is needed because NIC registers are in
for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
- if (j == conf->num)
+ if (j == conf->conf.queue_num)
j = 0;
- reta = (reta << 8) | conf->queue[j];
+ reta = (reta << 8) | conf->conf.queue[j];
if ((i & 3) == 3)
IXGBE_WRITE_REG(hw, reta_reg,
rte_bswap32(reta));
rss_conf.rss_key = rss_intel_key; /* Default hash key */
ixgbe_hw_rss_hash_set(hw, &rss_conf);
- rte_memcpy(&filter_info->rss_info,
- conf, sizeof(struct ixgbe_rte_flow_rss_conf));
+ if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
" for UDP RSS and inner VXLAN RSS");
/* Fake support for all possible RSS hash fields. */
priv->hw_rss_sup = ~UINT64_C(0);
- priv->hw_rss_sup = mlx4_conv_rss_hf(priv, -1);
+ priv->hw_rss_sup = mlx4_conv_rss_types(priv, -1);
/* Filter out known unsupported fields. */
priv->hw_rss_sup &=
~(uint64_t)(IBV_RX_HASH_SRC_PORT_UDP |
};
/**
- * Convert DPDK RSS hash fields to their Verbs equivalent.
+ * Convert DPDK RSS hash types to their Verbs equivalent.
*
- * This function returns the supported (default) set when @p rss_hf has
+ * This function returns the supported (default) set when @p types has
* special value (uint64_t)-1.
*
* @param priv
* Pointer to private structure.
- * @param rss_hf
- * Hash fields in DPDK format (see struct rte_eth_rss_conf).
+ * @param types
+ * Hash types in DPDK format (see struct rte_eth_rss_conf).
*
* @return
* A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1
* otherwise and rte_errno is set.
*/
uint64_t
-mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf)
+mlx4_conv_rss_types(struct priv *priv, uint64_t types)
{
enum { IPV4, IPV6, TCP, UDP, };
const uint64_t in[] = {
unsigned int i;
for (i = 0; i != RTE_DIM(in); ++i)
- if (rss_hf & in[i]) {
- seen |= rss_hf & in[i];
+ if (types & in[i]) {
+ seen |= types & in[i];
conv |= out[i];
}
if ((conv & priv->hw_rss_sup) == conv) {
- if (rss_hf == (uint64_t)-1) {
+ if (types == (uint64_t)-1) {
/* Include inner RSS by default if supported. */
conv |= priv->hw_rss_sup & IBV_RX_HASH_INNER;
return conv;
}
- if (!(rss_hf & ~seen))
+ if (!(types & ~seen))
return conv;
}
rte_errno = ENOTSUP;
switch (action->type) {
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
- const struct rte_eth_rss_conf *rss_conf;
+ const uint8_t *rss_key;
+ uint32_t rss_key_len;
uint64_t fields;
unsigned int i;
break;
rss = action->conf;
/* Default RSS configuration if none is provided. */
- rss_conf =
- rss->rss_conf ?
- rss->rss_conf :
- &(struct rte_eth_rss_conf){
- .rss_key = mlx4_rss_hash_key_default,
- .rss_key_len = MLX4_RSS_HASH_KEY_SIZE,
- .rss_hf = -1,
- };
+ if (rss->key_len) {
+ rss_key = rss->key;
+ rss_key_len = rss->key_len;
+ } else {
+ rss_key = mlx4_rss_hash_key_default;
+ rss_key_len = MLX4_RSS_HASH_KEY_SIZE;
+ }
/* Sanity checks. */
- for (i = 0; i < rss->num; ++i)
+ for (i = 0; i < rss->queue_num; ++i)
if (rss->queue[i] >=
priv->dev->data->nb_rx_queues)
break;
- if (i != rss->num) {
+ if (i != rss->queue_num) {
msg = "queue index target beyond number of"
" configured Rx queues";
goto exit_action_not_supported;
}
- if (!rte_is_power_of_2(rss->num)) {
+ if (!rte_is_power_of_2(rss->queue_num)) {
msg = "for RSS, mlx4 requires the number of"
" queues to be a power of two";
goto exit_action_not_supported;
}
- if (rss_conf->rss_key_len !=
- sizeof(flow->rss->key)) {
+ if (rss_key_len != sizeof(flow->rss->key)) {
msg = "mlx4 supports exactly one RSS hash key"
" length: "
MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
goto exit_action_not_supported;
}
- for (i = 1; i < rss->num; ++i)
+ for (i = 1; i < rss->queue_num; ++i)
if (rss->queue[i] - rss->queue[i - 1] != 1)
break;
- if (i != rss->num) {
+ if (i != rss->queue_num) {
msg = "mlx4 requires RSS contexts to use"
" consecutive queue indices only";
goto exit_action_not_supported;
}
- if (rss->queue[0] % rss->num) {
+ if (rss->queue[0] % rss->queue_num) {
msg = "mlx4 requires the first queue of a RSS"
" context to be aligned on a multiple"
" of the context size";
goto exit_action_not_supported;
}
rte_errno = 0;
- fields = mlx4_conv_rss_hf(priv, rss_conf->rss_hf);
+ fields = mlx4_conv_rss_types(priv, rss->types);
if (fields == (uint64_t)-1 && rte_errno) {
msg = "unsupported RSS hash type requested";
goto exit_action_not_supported;
}
flow->rss = mlx4_rss_get
- (priv, fields, rss_conf->rss_key, rss->num,
+ (priv, fields, rss_key, rss->queue_num,
rss->queue);
if (!flow->rss) {
msg = "either invalid parameters or not enough"
rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
uint16_t queue[queues];
struct rte_flow_action_rss action_rss = {
- .rss_conf = NULL, /* Rely on default fallback settings. */
- .num = queues,
+ .types = -1,
+ .key_len = MLX4_RSS_HASH_KEY_SIZE,
+ .queue_num = queues,
+ .key = mlx4_rss_hash_key_default,
.queue = queue,
};
struct rte_flow_action actions[] = {
/* mlx4_flow.c */
-uint64_t mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf);
+uint64_t mlx4_conv_rss_types(struct priv *priv, uint64_t rss_hf);
int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error);
void mlx4_flow_clean(struct priv *priv);
int mlx4_filter_ctrl(struct rte_eth_dev *dev,
*/
struct mlx4_rss *
mlx4_rss_get(struct priv *priv, uint64_t fields,
- uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
+ const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[])
{
struct mlx4_rss *rss;
int mlx4_rss_init(struct priv *priv);
void mlx4_rss_deinit(struct priv *priv);
struct mlx4_rss *mlx4_rss_get(struct priv *priv, uint64_t fields,
- uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
+ const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[]);
void mlx4_rss_put(struct mlx4_rss *rss);
int mlx4_rss_attach(struct mlx4_rss *rss);
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
uint32_t mark:1; /**< Set if the flow is marked. */
uint32_t drop:1; /**< Drop queue. */
- uint16_t queues_n; /**< Number of entries in queue[]. */
+ struct rte_flow_action_rss rss_conf; /**< RSS configuration */
uint16_t (*queues)[]; /**< Queues indexes to use. */
- struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
uint8_t rss_key[40]; /**< copy of the RSS key. */
struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
uint32_t mark:1; /**< Mark is present in the flow. */
uint32_t count:1; /**< Count is present in the flow. */
uint32_t mark_id; /**< Mark identifier. */
+ struct rte_flow_action_rss rss_conf; /**< RSS configuration */
uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
- uint16_t queues_n; /**< Number of entries in queue[]. */
- struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
uint8_t rss_key[40]; /**< copy of the RSS key. */
enum hash_rxq_type layer; /**< Last pattern layer detected. */
struct ibv_counter_set *cs; /**< Holds the counter set for the rule */
return 0;
}
-/**
- * Copy the RSS configuration from the user ones, of the rss_conf is null,
- * uses the driver one.
- *
- * @param parser
- * Internal parser structure.
- * @param rss_conf
- * User RSS configuration to save.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser,
- const struct rte_eth_rss_conf *rss_conf)
-{
- /*
- * This function is also called at the beginning of
- * mlx5_flow_convert_actions() to initialize the parser with the
- * device default RSS configuration.
- */
- if (rss_conf) {
- if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
- rte_errno = EINVAL;
- return -rte_errno;
- }
- if (rss_conf->rss_key_len != 40) {
- rte_errno = EINVAL;
- return -rte_errno;
- }
- if (rss_conf->rss_key_len && rss_conf->rss_key) {
- parser->rss_conf.rss_key_len = rss_conf->rss_key_len;
- memcpy(parser->rss_key, rss_conf->rss_key,
- rss_conf->rss_key_len);
- parser->rss_conf.rss_key = parser->rss_key;
- }
- parser->rss_conf.rss_hf = rss_conf->rss_hf;
- }
- return 0;
-}
-
/**
* Extract attribute to the parser.
*
enum { FATE = 1, MARK = 2, COUNT = 4, };
uint32_t overlap = 0;
struct priv *priv = dev->data->dev_private;
- int ret;
- /*
- * Add default RSS configuration necessary for Verbs to create QP even
- * if no RSS is necessary.
- */
- ret = mlx5_flow_convert_rss_conf(parser,
- (const struct rte_eth_rss_conf *)
- &priv->rss_conf);
- if (ret)
- return ret;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
continue;
overlap |= FATE;
if (!queue || (queue->index > (priv->rxqs_n - 1)))
goto exit_action_not_supported;
- parser->queues_n = 1;
parser->queues[0] = queue->index;
+ parser->rss_conf = (struct rte_flow_action_rss){
+ .queue_num = 1,
+ .queue = parser->queues,
+ };
} else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
const struct rte_flow_action_rss *rss =
(const struct rte_flow_action_rss *)
actions->conf;
+ const uint8_t *rss_key;
+ uint32_t rss_key_len;
uint16_t n;
if (overlap & FATE)
goto exit_action_overlap;
overlap |= FATE;
- if (!rss || !rss->num) {
+ if (rss->types & MLX5_RSS_HF_MASK) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "unsupported RSS type"
+ " requested");
+ return -rte_errno;
+ }
+ if (rss->key_len) {
+ rss_key_len = rss->key_len;
+ rss_key = rss->key;
+ } else {
+ rss_key_len = rss_hash_default_key_len;
+ rss_key = rss_hash_default_key;
+ }
+ if (rss_key_len != RTE_DIM(parser->rss_key)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "RSS hash key must be"
+ " exactly 40 bytes long");
+ return -rte_errno;
+ }
+ if (!rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no valid queues");
return -rte_errno;
}
- if (rss->num > RTE_DIM(parser->queues)) {
+ if (rss->queue_num > RTE_DIM(parser->queues)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
" context");
return -rte_errno;
}
- for (n = 0; n < rss->num; ++n) {
+ for (n = 0; n < rss->queue_num; ++n) {
if (rss->queue[n] >= priv->rxqs_n) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
return -rte_errno;
}
}
- for (n = 0; n < rss->num; ++n)
- parser->queues[n] = rss->queue[n];
- parser->queues_n = rss->num;
- if (mlx5_flow_convert_rss_conf(parser, rss->rss_conf)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "wrong RSS configuration");
- return -rte_errno;
- }
+ parser->rss_conf = (struct rte_flow_action_rss){
+ .types = rss->types,
+ .key_len = rss_key_len,
+ .queue_num = rss->queue_num,
+ .key = memcpy(parser->rss_key, rss_key,
+ sizeof(*rss_key) * rss_key_len),
+ .queue = memcpy(parser->queues, rss->queue,
+ sizeof(*rss->queue) *
+ rss->queue_num),
+ };
} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
parser->drop = 1;
if (parser->drop && parser->mark)
parser->mark = 0;
- if (!parser->queues_n && !parser->drop) {
+ if (!parser->rss_conf.queue_num && !parser->drop) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "no valid action");
return -rte_errno;
unsigned int i;
/* Remove any other flow not matching the pattern. */
- if (parser->queues_n == 1 && !parser->rss_conf.rss_hf) {
+ if (parser->rss_conf.queue_num == 1 && !parser->rss_conf.types) {
for (i = 0; i != hash_rxq_init_n; ++i) {
if (i == HASH_RXQ_ETH)
continue;
}
/* Remove impossible flow according to the RSS configuration. */
if (hash_rxq_init[parser->layer].dpdk_rss_hf &
- parser->rss_conf.rss_hf) {
+ parser->rss_conf.types) {
/* Remove any other flow. */
for (i = hmin; i != (hmax + 1); ++i) {
if ((i == parser->layer) ||
}
} else if (!parser->queue[ip].ibv_attr) {
/* no RSS possible with the current configuration. */
- parser->queues_n = 1;
+ parser->rss_conf.queue_num = 1;
return;
}
fill:
for (i = 0; i != hash_rxq_init_n; ++i) {
unsigned int offset;
- if (!(parser->rss_conf.rss_hf &
+ if (!(parser->rss_conf.types &
hash_rxq_init[i].dpdk_rss_hf) &&
(i != HASH_RXQ_ETH))
continue;
continue;
flow->frxq[i].hrxq =
mlx5_hrxq_get(dev,
- parser->rss_conf.rss_key,
- parser->rss_conf.rss_key_len,
+ parser->rss_conf.key,
+ parser->rss_conf.key_len,
hash_fields,
- parser->queues,
- parser->queues_n);
+ parser->rss_conf.queue,
+ parser->rss_conf.queue_num);
if (flow->frxq[i].hrxq)
continue;
flow->frxq[i].hrxq =
mlx5_hrxq_new(dev,
- parser->rss_conf.rss_key,
- parser->rss_conf.rss_key_len,
+ parser->rss_conf.key,
+ parser->rss_conf.key_len,
hash_fields,
- parser->queues,
- parser->queues_n);
+ parser->rss_conf.queue,
+ parser->rss_conf.queue_num);
if (!flow->frxq[i].hrxq) {
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "internal error in flow creation");
goto error;
}
- for (i = 0; i != parser->queues_n; ++i) {
+ for (i = 0; i != parser->rss_conf.queue_num; ++i) {
struct mlx5_rxq_data *q =
- (*priv->rxqs)[parser->queues[i]];
+ (*priv->rxqs)[parser->rss_conf.queue[i]];
q->mark |= parser->mark;
}
if (ret)
goto exit;
flow = rte_calloc(__func__, 1,
- sizeof(*flow) + parser.queues_n * sizeof(uint16_t),
+ sizeof(*flow) +
+ parser.rss_conf.queue_num * sizeof(uint16_t),
0);
if (!flow) {
rte_flow_error_set(error, ENOMEM,
"cannot allocate flow memory");
return NULL;
}
- /* Copy queues configuration. */
+ /* Copy configuration. */
flow->queues = (uint16_t (*)[])(flow + 1);
- memcpy(flow->queues, parser.queues, parser.queues_n * sizeof(uint16_t));
- flow->queues_n = parser.queues_n;
+ flow->rss_conf = (struct rte_flow_action_rss){
+ .types = parser.rss_conf.types,
+ .key_len = parser.rss_conf.key_len,
+ .queue_num = parser.rss_conf.queue_num,
+ .key = memcpy(flow->rss_key, parser.rss_conf.key,
+ sizeof(*parser.rss_conf.key) *
+ parser.rss_conf.key_len),
+ .queue = memcpy(flow->queues, parser.rss_conf.queue,
+ sizeof(*parser.rss_conf.queue) *
+ parser.rss_conf.queue_num),
+ };
flow->mark = parser.mark;
- /* Copy RSS configuration. */
- flow->rss_conf = parser.rss_conf;
- flow->rss_conf.rss_key = flow->rss_key;
- memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
/* finalise the flow. */
if (parser.drop)
ret = mlx5_flow_create_action_queue_drop(dev, &parser, flow,
if (flow->drop || !flow->mark)
goto free;
- for (i = 0; i != flow->queues_n; ++i) {
+ for (i = 0; i != flow->rss_conf.queue_num; ++i) {
struct rte_flow *tmp;
int mark = 0;
if (!flow->frxq[i].ibv_attr)
continue;
flow->frxq[i].hrxq =
- mlx5_hrxq_get(dev, flow->rss_conf.rss_key,
- flow->rss_conf.rss_key_len,
+ mlx5_hrxq_get(dev, flow->rss_conf.key,
+ flow->rss_conf.key_len,
hash_rxq_init[i].hash_fields,
- (*flow->queues),
- flow->queues_n);
+ flow->rss_conf.queue,
+ flow->rss_conf.queue_num);
if (flow->frxq[i].hrxq)
goto flow_create;
flow->frxq[i].hrxq =
- mlx5_hrxq_new(dev, flow->rss_conf.rss_key,
- flow->rss_conf.rss_key_len,
+ mlx5_hrxq_new(dev, flow->rss_conf.key,
+ flow->rss_conf.key_len,
hash_rxq_init[i].hash_fields,
- (*flow->queues),
- flow->queues_n);
+ flow->rss_conf.queue,
+ flow->rss_conf.queue_num);
if (!flow->frxq[i].hrxq) {
DRV_LOG(DEBUG,
"port %u flow %p cannot be applied",
}
if (!flow->mark)
continue;
- for (i = 0; i != flow->queues_n; ++i)
- (*priv->rxqs)[(*flow->queues)[i]]->mark = 1;
+ for (i = 0; i != flow->rss_conf.queue_num; ++i)
+ (*priv->rxqs)[flow->rss_conf.queue[i]]->mark = 1;
}
return 0;
}
};
uint16_t queue[priv->reta_idx_n];
struct rte_flow_action_rss action_rss = {
- .rss_conf = &priv->rss_conf,
- .num = priv->reta_idx_n,
+ .types = priv->rss_conf.rss_hf,
+ .key_len = priv->rss_conf.rss_key_len,
+ .queue_num = priv->reta_idx_n,
+ .key = priv->rss_conf.rss_key,
.queue = queue,
};
struct rte_flow_action actions[] = {
* The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
- uint16_t queues_n)
+mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
* An indirection table if found.
*/
struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[],
- uint16_t queues_n)
+mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
* The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_hrxq *
-mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
- uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+mlx5_hrxq_new(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
rte_errno = ENOMEM;
return NULL;
}
+ if (!rss_key_len) {
+ rss_key_len = rss_hash_default_key_len;
+ rss_key = rss_hash_default_key;
+ }
qp = mlx5_glue->create_qp_ex
(priv->ctx,
&(struct ibv_qp_init_attr_ex){
.rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
.rx_hash_key_len = rss_key_len,
- .rx_hash_key = rss_key,
+ .rx_hash_key = (void *)(uintptr_t)rss_key,
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
* An hash Rx queue on success.
*/
struct mlx5_hrxq *
-mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
- uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+mlx5_hrxq_get(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
- uint16_t queues_n; /**< Number of queues in the list. */
+ uint32_t queues_n; /**< Number of queues in the list. */
uint16_t queues[]; /**< Queue list. */
};
struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
struct ibv_qp *qp; /* Verbs queue pair. */
uint64_t hash_fields; /* Verbs Hash fields. */
- uint8_t rss_key_len; /* Hash key length in bytes. */
+ uint32_t rss_key_len; /* Hash key length in bytes. */
uint8_t rss_key[]; /* Hash key. */
};
int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
- uint16_t queues[],
- uint16_t queues_n);
+ const uint16_t *queues,
+ uint32_t queues_n);
struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
- uint16_t queues[],
- uint16_t queues_n);
+ const uint16_t *queues,
+ uint32_t queues_n);
int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_ibv *ind_tbl);
int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
-struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key,
- uint8_t rss_key_len, uint64_t hash_fields,
- uint16_t queues[], uint16_t queues_n);
-struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key,
- uint8_t rss_key_len, uint64_t hash_fields,
- uint16_t queues[], uint16_t queues_n);
+struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n);
+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n);
int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
uint64_t mlx5_get_rx_port_offloads(void);
struct sfc_rxq *rxq;
unsigned int rxq_hw_index_min;
unsigned int rxq_hw_index_max;
- const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
- uint64_t rss_hf;
- uint8_t *rss_key = NULL;
+ const uint8_t *rss_key;
struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
unsigned int i;
- if (rss->num == 0)
+ if (rss->queue_num == 0)
return -EINVAL;
rxq_sw_index = sa->rxq_count - 1;
rxq_hw_index_min = rxq->hw_index;
rxq_hw_index_max = 0;
- for (i = 0; i < rss->num; ++i) {
+ for (i = 0; i < rss->queue_num; ++i) {
rxq_sw_index = rss->queue[i];
if (rxq_sw_index >= sa->rxq_count)
rxq_hw_index_max = rxq->hw_index;
}
- rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
- if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
+ if ((rss->types & ~SFC_RSS_OFFLOADS) != 0)
return -EINVAL;
- if (rss_conf != NULL) {
- if (rss_conf->rss_key_len != sizeof(sa->rss_key))
+ if (rss->key_len) {
+ if (rss->key_len != sizeof(sa->rss_key))
return -EINVAL;
- rss_key = rss_conf->rss_key;
+ rss_key = rss->key;
} else {
rss_key = sa->rss_key;
}
sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
- sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
+ sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss->types);
rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
- unsigned int rxq_sw_index = rss->queue[i % rss->num];
+ unsigned int rxq_sw_index = rss->queue[i % rss->queue_num];
struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
if (err)
goto exit_action_not_supported;
}
- if (flow && rss)
+ if (flow)
err = rss_add_actions(flow, pmd, rss, error);
} else {
goto exit_action_not_supported;
struct rte_flow_error *error)
{
/* 4096 is the maximum number of instructions for a BPF program */
- int i;
+ unsigned int i;
int err;
struct rss_key rss_entry = { .hash_fields = 0,
.key_size = 0 };
}
/* Update RSS map entry with queues */
- rss_entry.nb_queues = rss->num;
- for (i = 0; i < rss->num; i++)
+ rss_entry.nb_queues = rss->queue_num;
+ for (i = 0; i < rss->queue_num; i++)
rss_entry.queues[i] = rss->queue[i];
rss_entry.hash_fields =
(1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);
i < eth_dev->data->nb_rx_queues; ++i)
if (eth_dev->data->rx_queues[i])
queue[j++] = i;
- action_rss.rss_conf = &rss_conf;
- action_rss.num = j;
- action_rss.queue = queue;
+ action_rss = (struct rte_flow_action_rss){
+ .types = rss_conf.rss_hf,
+ .key_len = rss_conf.rss_key_len,
+ .queue_num = j,
+ .key = rss_key,
+ .queue = queue,
+ };
ret = rte_flow_validate(sa->portid, &sa->attr,
sa->pattern, sa->action,
&err);
off = 0;
if (dst.rss)
*dst.rss = (struct rte_flow_action_rss){
- .num = src.rss->num,
+ .types = src.rss->types,
+ .key_len = src.rss->key_len,
+ .queue_num = src.rss->queue_num,
};
off += sizeof(*src.rss);
- if (src.rss->num) {
+ if (src.rss->key_len) {
off = RTE_ALIGN_CEIL(off, sizeof(double));
- size = sizeof(*src.rss->queue) * src.rss->num;
+ size = sizeof(*src.rss->key) * src.rss->key_len;
if (dst.rss)
- dst.rss->queue = memcpy
+ dst.rss->key = memcpy
((void *)((uintptr_t)dst.rss + off),
- src.rss->queue, size);
+ src.rss->key, size);
off += size;
}
- off = RTE_ALIGN_CEIL(off, sizeof(double));
- if (dst.rss) {
- dst.rss->rss_conf = (void *)((uintptr_t)dst.rss + off);
- *(struct rte_eth_rss_conf *)(uintptr_t)
- dst.rss->rss_conf = (struct rte_eth_rss_conf){
- .rss_key_len = src.rss->rss_conf->rss_key_len,
- .rss_hf = src.rss->rss_conf->rss_hf,
- };
- }
- off += sizeof(*src.rss->rss_conf);
- if (src.rss->rss_conf->rss_key_len) {
+ if (src.rss->queue_num) {
off = RTE_ALIGN_CEIL(off, sizeof(double));
- size = sizeof(*src.rss->rss_conf->rss_key) *
- src.rss->rss_conf->rss_key_len;
- if (dst.rss) {
- ((struct rte_eth_rss_conf *)(uintptr_t)
- dst.rss->rss_conf)->rss_key =
- (void *)((uintptr_t)dst.rss + off);
- memcpy(dst.rss->rss_conf->rss_key,
- src.rss->rss_conf->rss_key,
- size);
- }
+ size = sizeof(*src.rss->queue) * src.rss->queue_num;
+ if (dst.rss)
+ dst.rss->queue = memcpy
+ ((void *)((uintptr_t)dst.rss + off),
+ src.rss->queue, size);
off += size;
}
size = off;
* Similar to QUEUE, except RSS is additionally performed on packets to
* spread them among several queues according to the provided parameters.
*
+ * Unlike global RSS settings used by other DPDK APIs, unsetting the
+ * @p types field does not disable RSS in a flow rule. Doing so instead
+ * requests safe unspecified "best-effort" settings from the underlying PMD,
+ * which depending on the flow rule, may result in anything ranging from
+ * empty (single queue) to all-inclusive RSS.
+ *
* Note: RSS hash result is stored in the hash.rss mbuf field which overlaps
* hash.fdir.lo. Since the MARK action sets the hash.fdir.hi field only,
* both can be requested simultaneously.
*/
struct rte_flow_action_rss {
- const struct rte_eth_rss_conf *rss_conf; /**< RSS parameters. */
- uint16_t num; /**< Number of entries in @p queue. */
+ uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+ uint32_t key_len; /**< Hash key length in bytes. */
+ uint32_t queue_num; /**< Number of entries in @p queue. */
+ const uint8_t *key; /**< Hash key. */
const uint16_t *queue; /**< Queue indices to use. */
};