} \
} while (0)
+#define IGB_FLEX_RAW_NUM 12
+
/**
* Please aware there's an asumption for all the parsers.
* rte_flow_item is using big endian, rte_flow_attr and
if (hw->mac.type == e1000_82576) {
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(
+ struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "queue number not supported "
}
} else {
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(
+ struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "queue number not supported "
const struct rte_flow_item_raw *raw_spec;
const struct rte_flow_item_raw *raw_mask;
const struct rte_flow_action_queue *act_q;
- uint32_t index, i, offset, total_offset = 0;
- int32_t shift;
+ uint32_t index, i, offset, total_offset;
+ uint32_t max_offset = 0;
+ int32_t shift, j, raw_index = 0;
+ int32_t relative[IGB_FLEX_RAW_NUM] = {0};
+ int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
if (!pattern) {
rte_flow_error_set(error, EINVAL,
else
offset = 0;
- for (index = 0; index < raw_spec->length; index++) {
- if (raw_mask->pattern[index] != 0xFF) {
+ for (j = 0; j < raw_spec->length; j++) {
+ if (raw_mask->pattern[j] != 0xFF) {
memset(filter, 0, sizeof(struct rte_eth_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
}
}
+ total_offset = 0;
+
+ if (raw_spec->relative) {
+ for (j = raw_index; j > 0; j--) {
+ total_offset += raw_offset[j - 1];
+ if (!relative[j - 1])
+ break;
+ }
+ if (total_offset + raw_spec->length + offset > max_offset)
+ max_offset = total_offset + raw_spec->length + offset;
+ } else {
+ if (raw_spec->length + offset > max_offset)
+ max_offset = raw_spec->length + offset;
+ }
+
if ((raw_spec->length + offset + total_offset) >
RTE_FLEX_FILTER_MAXLEN) {
memset(filter, 0, sizeof(struct rte_eth_flex_filter));
}
if (raw_spec->relative == 0) {
- for (index = 0; index < raw_spec->length; index++)
- filter->bytes[index] = raw_spec->pattern[index];
- index = offset / CHAR_BIT;
+ for (j = 0; j < raw_spec->length; j++)
+ filter->bytes[offset + j] =
+ raw_spec->pattern[j];
+ j = offset / CHAR_BIT;
+ shift = offset % CHAR_BIT;
} else {
- for (index = 0; index < raw_spec->length; index++)
- filter->bytes[total_offset + index] =
- raw_spec->pattern[index];
- index = (total_offset + offset) / CHAR_BIT;
+ for (j = 0; j < raw_spec->length; j++)
+ filter->bytes[total_offset + offset + j] =
+ raw_spec->pattern[j];
+ j = (total_offset + offset) / CHAR_BIT;
+ shift = (total_offset + offset) % CHAR_BIT;
}
i = 0;
- for (shift = offset % CHAR_BIT; shift < CHAR_BIT; shift++) {
- filter->mask[index] |= (0x80 >> shift);
+ for ( ; shift < CHAR_BIT; shift++) {
+ filter->mask[j] |= (0x80 >> shift);
i++;
if (i == raw_spec->length)
break;
if (shift == (CHAR_BIT - 1)) {
- index++;
+ j++;
shift = -1;
}
}
- total_offset += offset + raw_spec->length;
+ relative[raw_index] = raw_spec->relative;
+ raw_offset[raw_index] = offset + raw_spec->length;
+ raw_index++;
/* check if the next not void item is RAW */
index++;
goto item_loop;
}
- filter->len = RTE_ALIGN(total_offset, 8);
+ filter->len = RTE_ALIGN(max_offset, 8);
/* parse action */
index = 0;
if (!ret) {
ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
sizeof(struct igb_ntuple_filter_ele), 0);
- (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+ rte_memcpy(&ntuple_filter_ptr->filter_info,
&ntuple_filter,
sizeof(struct rte_eth_ntuple_filter));
TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
ethertype_filter_ptr = rte_zmalloc(
"igb_ethertype_filter",
sizeof(struct igb_ethertype_filter_ele), 0);
- (void)rte_memcpy(ðertype_filter_ptr->filter_info,
+ rte_memcpy(ðertype_filter_ptr->filter_info,
ðertype_filter,
sizeof(struct rte_eth_ethertype_filter));
TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
if (!ret) {
syn_filter_ptr = rte_zmalloc("igb_syn_filter",
sizeof(struct igb_eth_syn_filter_ele), 0);
- (void)rte_memcpy(&syn_filter_ptr->filter_info,
+ rte_memcpy(&syn_filter_ptr->filter_info,
&syn_filter,
sizeof(struct rte_eth_syn_filter));
TAILQ_INSERT_TAIL(&igb_filter_syn_list,
if (!ret) {
flex_filter_ptr = rte_zmalloc("igb_flex_filter",
sizeof(struct igb_flex_filter_ele), 0);
- (void)rte_memcpy(&flex_filter_ptr->filter_info,
+ rte_memcpy(&flex_filter_ptr->filter_info,
&flex_filter,
sizeof(struct rte_eth_flex_filter));
TAILQ_INSERT_TAIL(&igb_filter_flex_list,
}
const struct rte_flow_ops igb_flow_ops = {
- igb_flow_validate,
- igb_flow_create,
- igb_flow_destroy,
- igb_flow_flush,
- NULL,
+ .validate = igb_flow_validate,
+ .create = igb_flow_create,
+ .destroy = igb_flow_destroy,
+ .flush = igb_flow_flush,
};