X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Figb_flow.c;h=76c0c520f8a184e3d9ea6b58cd1b6856fced9df1;hb=821f86e0f4d4c773316f537c16b46c9398a9060b;hp=43eebba87c07872312d68cc5d49678f5b38da4c7;hpb=a8600af437389d8b65f99c22335bff47810c6a69;p=dpdk.git diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c index 43eebba87c..76c0c520f8 100644 --- a/drivers/net/e1000/igb_flow.c +++ b/drivers/net/e1000/igb_flow.c @@ -77,6 +77,8 @@ } \ } while (0) +#define IGB_FLEX_RAW_NUM 12 + /** * Please aware there's an asumption for all the parsers. * rte_flow_item is using big endian, rte_flow_attr and @@ -692,7 +694,8 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, if (hw->mac.type == e1000_82576) { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof( + struct rte_eth_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not supported " @@ -701,7 +704,8 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, } } else { if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + memset(filter, 0, sizeof( + struct rte_eth_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "queue number not supported " @@ -738,6 +742,695 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, return 0; } +/** + * Parse the rule to see if it is a TCP SYN rule. + * And get the TCP SYN filter info BTW. + * pattern: + * The first not void item must be ETH. + * The second not void item must be IPV4 or IPV6. + * The third not void item must be TCP. + * The next not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * ETH NULL NULL + * IPV4/IPV6 NULL NULL + * TCP tcp_flags 0x02 0xFF + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_syn_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_syn_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_tcp *tcp_spec; + const struct rte_flow_item_tcp *tcp_mask; + const struct rte_flow_action_queue *act_q; + uint32_t index; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /* parse pattern */ + index = 0; + + /* the first not void item should be MAC or IPv4 or IPv6 or TCP */ + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && + item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6 && + item->type != RTE_FLOW_ITEM_TYPE_TCP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + /* Skip Ethernet */ + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { + /* if the item is MAC, the content should be NULL */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN address mask"); + return -rte_errno; + } + + /* check if the next not void item is IPv4 or IPv6 */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 && + item->type != RTE_FLOW_ITEM_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + } + + /* Skip IP */ + if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6) { + /* if the item is IP, the content should be NULL */ + if (item->spec || item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN mask"); + return -rte_errno; + } + + /* check if the next not void item is TCP */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_TCP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + } + + /* Get the TCP info. Only support SYN. */ + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid SYN mask"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || + tcp_mask->hdr.src_port || + tcp_mask->hdr.dst_port || + tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by syn filter"); + return -rte_errno; + } + + /* parse action */ + index = 0; + + /* check if the first not void action is QUEUE. */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + /* Support 2 priorities, the lowest or highest. */ + if (!attr->priority) { + filter->hig_pri = 0; + } else if (attr->priority == (uint32_t)~0U) { + filter->hig_pri = 1; + } else { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + + return 0; +} + +static int +igb_parse_syn_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_syn_filter *filter, + struct rte_flow_error *error) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + ret = cons_parse_syn_filter(attr, pattern, + actions, filter, error); + + if (hw->mac.type == e1000_82576) { + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue number not " + "supported by syn filter"); + return -rte_errno; + } + } else { + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue number not " + "supported by syn filter"); + return -rte_errno; + } + } + + if (ret) + return ret; + + return 0; +} + +/** + * Parse the rule to see if it is a flex byte rule. + * And get the flex byte filter info BTW. + * pattern: + * The first not void item must be RAW. + * The second not void item can be RAW or END. + * The third not void item can be RAW or END. + * The last not void item must be END. + * action: + * The first not void action should be QUEUE. + * The next not void action should be END. + * pattern example: + * ITEM Spec Mask + * RAW relative 0 0x1 + * offset 0 0xFFFFFFFF + * pattern {0x08, 0x06} {0xFF, 0xFF} + * RAW relative 1 0x1 + * offset 100 0xFFFFFFFF + * pattern {0x11, 0x22, 0x33} {0xFF, 0xFF, 0xFF} + * END + * other members in mask and spec should set to 0x00. + * item->last should be NULL. + */ +static int +cons_parse_flex_filter(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_flex_filter *filter, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item; + const struct rte_flow_action *act; + const struct rte_flow_item_raw *raw_spec; + const struct rte_flow_item_raw *raw_mask; + const struct rte_flow_action_queue *act_q; + uint32_t index, i, offset, total_offset; + uint32_t max_offset = 0; + int32_t shift, j, raw_index = 0; + int32_t relative[IGB_FLEX_RAW_NUM] = {0}; + int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0}; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + /* parse pattern */ + index = 0; + +item_loop: + + /* the first not void item should be RAW */ + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by flex filter"); + return -rte_errno; + } + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + item, "Not supported last point for range"); + return -rte_errno; + } + + raw_spec = (const struct rte_flow_item_raw *)item->spec; + raw_mask = (const struct rte_flow_item_raw *)item->mask; + + if (!raw_mask->length || + !raw_mask->relative) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by flex filter"); + return -rte_errno; + } + + if (raw_mask->offset) + offset = raw_spec->offset; + else + offset = 0; + + for (j = 0; j < raw_spec->length; j++) { + if (raw_mask->pattern[j] != 0xFF) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by flex filter"); + return -rte_errno; + } + } + + total_offset = 0; + + if (raw_spec->relative) { + for (j = raw_index; j > 0; j--) { + total_offset += raw_offset[j - 1]; + if (!relative[j - 1]) + break; + } + if (total_offset + raw_spec->length + offset > max_offset) + max_offset = total_offset + raw_spec->length + offset; + } else { + if (raw_spec->length + offset > max_offset) + max_offset = raw_spec->length + offset; + } + + if ((raw_spec->length + offset + total_offset) > + RTE_FLEX_FILTER_MAXLEN) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by flex filter"); + return -rte_errno; + } + + if (raw_spec->relative == 0) { + for (j = 0; j < raw_spec->length; j++) + filter->bytes[offset + j] = + raw_spec->pattern[j]; + j = offset / CHAR_BIT; + shift = offset % CHAR_BIT; + } else { + for (j = 0; j < raw_spec->length; j++) + filter->bytes[total_offset + offset + j] = + raw_spec->pattern[j]; + j = (total_offset + offset) / CHAR_BIT; + shift = (total_offset + offset) % CHAR_BIT; + } + + i = 0; + + for ( ; shift < CHAR_BIT; shift++) { + filter->mask[j] |= (0x80 >> shift); + i++; + if (i == raw_spec->length) + break; + if (shift == (CHAR_BIT - 1)) { + j++; + shift = -1; + } + } + + relative[raw_index] = raw_spec->relative; + raw_offset[raw_index] = offset + raw_spec->length; + raw_index++; + + /* check if the next not void item is RAW */ + index++; + NEXT_ITEM_OF_PATTERN(item, pattern, index); + if (item->type != RTE_FLOW_ITEM_TYPE_RAW && + item->type != RTE_FLOW_ITEM_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Not supported by flex filter"); + return -rte_errno; + } + + /* go back to parser */ + if (item->type == RTE_FLOW_ITEM_TYPE_RAW) { + /* if the item is RAW, the content should be parse */ + goto item_loop; + } + + filter->len = RTE_ALIGN(max_offset, 8); + + /* parse action */ + index = 0; + + /* check if the first not void action is QUEUE. */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + act_q = (const struct rte_flow_action_queue *)act->conf; + filter->queue = act_q->index; + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + /* parse attr */ + /* must be input direction */ + if (!attr->ingress) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* not supported */ + if (attr->egress) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "Not support egress."); + return -rte_errno; + } + + if (attr->priority > 0xFFFF) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Error priority."); + return -rte_errno; + } + + filter->priority = (uint16_t)attr->priority; + + return 0; +} + +static int +igb_parse_flex_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_eth_flex_filter *filter, + struct rte_flow_error *error) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + + ret = cons_parse_flex_filter(attr, pattern, + actions, filter, error); + + if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "queue number not supported by flex filter"); + return -rte_errno; + } + + if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN || + filter->len % sizeof(uint64_t) != 0) { + PMD_DRV_LOG(ERR, "filter's length is out of range"); + return -EINVAL; + } + + if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) { + PMD_DRV_LOG(ERR, "filter's priority is out of range"); + return -EINVAL; + } + + if (ret) + return ret; + + return 0; +} + +/** + * Create a flow rule. + * Theorically one rule can match more than one filters. + * We will let it use the filter which it hitt first. + * So, the sequence matters. + */ +static struct rte_flow * +igb_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + struct rte_eth_ntuple_filter ntuple_filter; + struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct rte_eth_flex_filter flex_filter; + struct rte_flow *flow = NULL; + struct igb_ntuple_filter_ele *ntuple_filter_ptr; + struct igb_ethertype_filter_ele *ethertype_filter_ptr; + struct igb_eth_syn_filter_ele *syn_filter_ptr; + struct igb_flex_filter_ele *flex_filter_ptr; + struct igb_flow_mem *igb_flow_mem_ptr; + + flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0); + if (!flow) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return (struct rte_flow *)flow; + } + igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem", + sizeof(struct igb_flow_mem), 0); + if (!igb_flow_mem_ptr) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + rte_free(flow); + return NULL; + } + igb_flow_mem_ptr->flow = flow; + igb_flow_mem_ptr->dev = dev; + TAILQ_INSERT_TAIL(&igb_flow_list, + igb_flow_mem_ptr, entries); + + memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); + ret = igb_parse_ntuple_filter(dev, attr, pattern, + actions, &ntuple_filter, error); + if (!ret) { + ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE); + if (!ret) { + ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter", + sizeof(struct igb_ntuple_filter_ele), 0); + rte_memcpy(&ntuple_filter_ptr->filter_info, + &ntuple_filter, + sizeof(struct rte_eth_ntuple_filter)); + TAILQ_INSERT_TAIL(&igb_filter_ntuple_list, + ntuple_filter_ptr, entries); + flow->rule = ntuple_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_NTUPLE; + return flow; + } + goto out; + } + + memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter)); + ret = igb_parse_ethertype_filter(dev, attr, pattern, + actions, ðertype_filter, error); + if (!ret) { + ret = igb_add_del_ethertype_filter(dev, + ðertype_filter, TRUE); + if (!ret) { + ethertype_filter_ptr = rte_zmalloc( + "igb_ethertype_filter", + sizeof(struct igb_ethertype_filter_ele), 0); + rte_memcpy(ðertype_filter_ptr->filter_info, + ðertype_filter, + sizeof(struct rte_eth_ethertype_filter)); + TAILQ_INSERT_TAIL(&igb_filter_ethertype_list, + ethertype_filter_ptr, entries); + flow->rule = ethertype_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_ETHERTYPE; + return flow; + } + goto out; + } + + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = igb_parse_syn_filter(dev, attr, pattern, + actions, &syn_filter, error); + if (!ret) { + ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE); + if (!ret) { + syn_filter_ptr = rte_zmalloc("igb_syn_filter", + sizeof(struct igb_eth_syn_filter_ele), 0); + rte_memcpy(&syn_filter_ptr->filter_info, + &syn_filter, + sizeof(struct rte_eth_syn_filter)); + TAILQ_INSERT_TAIL(&igb_filter_syn_list, + syn_filter_ptr, + entries); + flow->rule = syn_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_SYN; + return flow; + } + goto out; + } + + memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter)); + ret = igb_parse_flex_filter(dev, attr, pattern, + actions, &flex_filter, error); + if (!ret) { + ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE); + if (!ret) { + flex_filter_ptr = rte_zmalloc("igb_flex_filter", + sizeof(struct igb_flex_filter_ele), 0); + rte_memcpy(&flex_filter_ptr->filter_info, + &flex_filter, + sizeof(struct rte_eth_flex_filter)); + TAILQ_INSERT_TAIL(&igb_filter_flex_list, + flex_filter_ptr, entries); + flow->rule = flex_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_FLEXIBLE; + return flow; + } + } + +out: + TAILQ_REMOVE(&igb_flow_list, + igb_flow_mem_ptr, entries); + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(igb_flow_mem_ptr); + rte_free(flow); + return NULL; +} + /** * Check if the flow rule is supported by igb. * It only checkes the format. Don't guarantee the rule can be programmed into @@ -752,6 +1445,8 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev, { struct rte_eth_ntuple_filter ntuple_filter; struct rte_eth_ethertype_filter ethertype_filter; + struct rte_eth_syn_filter syn_filter; + struct rte_eth_flex_filter flex_filter; int ret; memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter)); @@ -766,13 +1461,247 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev, if (!ret) return 0; + memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter)); + ret = igb_parse_syn_filter(dev, attr, pattern, + actions, &syn_filter, error); + if (!ret) + return 0; + + memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter)); + ret = igb_parse_flex_filter(dev, attr, pattern, + actions, &flex_filter, error); + + return ret; +} + +/* Destroy a flow rule on igb. */ +static int +igb_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + struct rte_flow *pmd_flow = flow; + enum rte_filter_type filter_type = pmd_flow->filter_type; + struct igb_ntuple_filter_ele *ntuple_filter_ptr; + struct igb_ethertype_filter_ele *ethertype_filter_ptr; + struct igb_eth_syn_filter_ele *syn_filter_ptr; + struct igb_flex_filter_ele *flex_filter_ptr; + struct igb_flow_mem *igb_flow_mem_ptr; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ntuple_filter_ptr = (struct igb_ntuple_filter_ele *) + pmd_flow->rule; + ret = igb_add_del_ntuple_filter(dev, + &ntuple_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_ntuple_list, + ntuple_filter_ptr, entries); + rte_free(ntuple_filter_ptr); + } + break; + case RTE_ETH_FILTER_ETHERTYPE: + ethertype_filter_ptr = (struct igb_ethertype_filter_ele *) + pmd_flow->rule; + ret = igb_add_del_ethertype_filter(dev, + ðertype_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_ethertype_list, + ethertype_filter_ptr, entries); + rte_free(ethertype_filter_ptr); + } + break; + case RTE_ETH_FILTER_SYN: + syn_filter_ptr = (struct igb_eth_syn_filter_ele *) + pmd_flow->rule; + ret = eth_igb_syn_filter_set(dev, + &syn_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_syn_list, + syn_filter_ptr, entries); + rte_free(syn_filter_ptr); + } + break; + case RTE_ETH_FILTER_FLEXIBLE: + flex_filter_ptr = (struct igb_flex_filter_ele *) + pmd_flow->rule; + ret = eth_igb_add_del_flex_filter(dev, + &flex_filter_ptr->filter_info, FALSE); + if (!ret) { + TAILQ_REMOVE(&igb_filter_flex_list, + flex_filter_ptr, entries); + rte_free(flex_filter_ptr); + } + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to destroy flow"); + return ret; + } + + TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) { + if (igb_flow_mem_ptr->flow == pmd_flow) { + TAILQ_REMOVE(&igb_flow_list, + igb_flow_mem_ptr, entries); + rte_free(igb_flow_mem_ptr); + } + } + rte_free(flow); + return ret; } +/* remove all the n-tuple filters */ +static void +igb_clear_all_ntuple_filter(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter *p_5tuple; + struct e1000_2tuple_filter *p_2tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) + igb_delete_5tuple_filter_82576(dev, p_5tuple); + + while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) + igb_delete_2tuple_filter(dev, p_2tuple); +} + +/* remove all the ether type filters */ +static void +igb_clear_all_ethertype_filter(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; + + for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + (void)igb_ethertype_filter_remove(filter_info, + (uint8_t)i); + E1000_WRITE_REG(hw, E1000_ETQF(i), 0); + E1000_WRITE_FLUSH(hw); + } + } +} + +/* remove the SYN filter */ +static void +igb_clear_syn_filter(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) { + filter_info->syn_info = 0; + E1000_WRITE_REG(hw, E1000_SYNQF(0), 0); + E1000_WRITE_FLUSH(hw); + } +} + +/* remove all the flex filters */ +static void +igb_clear_all_flex_filter(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_flex_filter *flex_filter; + + while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list))) + igb_remove_flex_filter(dev, flex_filter); +} + +void +igb_filterlist_flush(struct rte_eth_dev *dev) +{ + struct igb_ntuple_filter_ele *ntuple_filter_ptr; + struct igb_ethertype_filter_ele *ethertype_filter_ptr; + struct igb_eth_syn_filter_ele *syn_filter_ptr; + struct igb_flex_filter_ele *flex_filter_ptr; + struct igb_flow_mem *igb_flow_mem_ptr; + enum rte_filter_type filter_type; + struct rte_flow *pmd_flow; + + TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) { + if (igb_flow_mem_ptr->dev == dev) { + pmd_flow = igb_flow_mem_ptr->flow; + filter_type = pmd_flow->filter_type; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ntuple_filter_ptr = + (struct igb_ntuple_filter_ele *) + pmd_flow->rule; + TAILQ_REMOVE(&igb_filter_ntuple_list, + ntuple_filter_ptr, entries); + rte_free(ntuple_filter_ptr); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ethertype_filter_ptr = + (struct igb_ethertype_filter_ele *) + pmd_flow->rule; + TAILQ_REMOVE(&igb_filter_ethertype_list, + ethertype_filter_ptr, entries); + rte_free(ethertype_filter_ptr); + break; + case RTE_ETH_FILTER_SYN: + syn_filter_ptr = + (struct igb_eth_syn_filter_ele *) + pmd_flow->rule; + TAILQ_REMOVE(&igb_filter_syn_list, + syn_filter_ptr, entries); + rte_free(syn_filter_ptr); + break; + case RTE_ETH_FILTER_FLEXIBLE: + flex_filter_ptr = + (struct igb_flex_filter_ele *) + pmd_flow->rule; + TAILQ_REMOVE(&igb_filter_flex_list, + flex_filter_ptr, entries); + rte_free(flex_filter_ptr); + break; + default: + PMD_DRV_LOG(WARNING, "Filter type" + "(%d) not supported", filter_type); + break; + } + TAILQ_REMOVE(&igb_flow_list, + igb_flow_mem_ptr, + entries); + rte_free(igb_flow_mem_ptr->flow); + rte_free(igb_flow_mem_ptr); + } + } +} + +/* Destroy all flow rules associated with a port on igb. */ +static int +igb_flow_flush(struct rte_eth_dev *dev, + __rte_unused struct rte_flow_error *error) +{ + igb_clear_all_ntuple_filter(dev); + igb_clear_all_ethertype_filter(dev); + igb_clear_syn_filter(dev); + igb_clear_all_flex_filter(dev); + igb_filterlist_flush(dev); + + return 0; +} + const struct rte_flow_ops igb_flow_ops = { - igb_flow_validate, - NULL, - NULL, - NULL, - NULL, + .validate = igb_flow_validate, + .create = igb_flow_create, + .destroy = igb_flow_destroy, + .flush = igb_flow_flush, };