X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Figb_flow.c;h=43fef889b5f4b540fbc3ab3943714c7d3632c0d7;hb=5fc66630bed5db8b0e2507e7324f1c8f98e0dd9a;hp=c0f5b5190ab4bb942226d062f1ff2199aef81870;hpb=f665846a010f35f48d03271dca60883166637bb9;p=dpdk.git diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c index c0f5b5190a..43fef889b5 100644 --- a/drivers/net/e1000/igb_flow.c +++ b/drivers/net/e1000/igb_flow.c @@ -49,6 +49,13 @@ #define IGB_FLEX_RAW_NUM 12 +struct igb_flow_mem_list igb_flow_list; +struct igb_ntuple_filter_list igb_filter_ntuple_list; +struct igb_ethertype_filter_list igb_filter_ethertype_list; +struct igb_syn_filter_list igb_filter_syn_list; +struct igb_flex_filter_list igb_filter_flex_list; +struct igb_rss_filter_list igb_filter_rss_list; + /** * Please aware there's an asumption for all the parsers. * rte_flow_item is using big endian, rte_flow_attr and @@ -379,6 +386,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -539,9 +555,9 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, * Mask bits of destination MAC address must be full * of 1 or full of 0. */ - if (!is_zero_ether_addr(ð_mask->src) || - (!is_zero_ether_addr(ð_mask->dst) && - !is_broadcast_ether_addr(ð_mask->dst))) { + if (!rte_is_zero_ether_addr(ð_mask->src) || + (!rte_is_zero_ether_addr(ð_mask->dst) && + !rte_is_broadcast_ether_addr(ð_mask->dst))) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid ether address mask"); @@ -558,7 +574,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, /* If mask bits of destination MAC address * are full of 1, set RTE_ETHTYPE_FLAGS_MAC. */ - if (is_broadcast_ether_addr(ð_mask->dst)) { + if (rte_is_broadcast_ether_addr(ð_mask->dst)) { filter->mac_addr = eth_spec->dst; filter->flags |= RTE_ETHTYPE_FLAGS_MAC; } else { @@ -623,6 +639,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, @@ -683,8 +707,8 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev, } } - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { memset(filter, 0, sizeof(struct rte_eth_ethertype_filter)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -849,13 +873,13 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, tcp_spec = item->spec; tcp_mask = item->mask; - if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || + if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) || tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port || tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || tcp_mask->hdr.data_off || - tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG || + tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG || tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { @@ -923,6 +947,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Support 2 priorities, the lowest or highest. */ if (!attr->priority) { filter->hig_pri = 0; @@ -1211,6 +1244,15 @@ item_loop: return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_flex_filter)); rte_flow_error_set(error, EINVAL, @@ -1272,6 +1314,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, struct igb_rte_flow_rss_conf *rss_conf, struct rte_flow_error *error) { + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); const struct rte_flow_action *act; const struct rte_flow_action_rss *rss; uint16_t n, index; @@ -1292,7 +1335,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, rss = (const struct rte_flow_action_rss *)act->conf; - if (!rss || !rss->num) { + if (!rss || !rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -1300,7 +1343,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } - for (n = 0; n < rss->num; n++) { + for (n = 0; n < rss->queue_num; n++) { if (rss->queue[n] >= dev->data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -1310,20 +1353,35 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, } } - if (rss->rss_conf) - rss_conf->rss_conf = *rss->rss_conf; - else - rss_conf->rss_conf.rss_hf = IGB_RSS_OFFLOAD_ALL; - - for (n = 0; n < rss->num; ++n) - rss_conf->queue[n] = rss->queue[n]; - rss_conf->num = rss->num; + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (((hw->mac.type == e1000_82576) && + (rss->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) || + ((hw->mac.type != e1000_82576) && + (rss->queue_num > IGB_MAX_RX_QUEUE_NUM))) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (igb_rss_conf_init(dev, rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); /* check if the next not void item is END */ index++; NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf)); + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, "Not supported action."); @@ -1349,6 +1407,15 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); rte_flow_error_set(error, EINVAL, @@ -1518,9 +1585,8 @@ igb_flow_create(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "failed to allocate memory"); goto out; } - rte_memcpy(&rss_filter_ptr->filter_info, - &rss_conf, - sizeof(struct igb_rte_flow_rss_conf)); + igb_rss_conf_init(dev, &rss_filter_ptr->filter_info, + &rss_conf.conf); TAILQ_INSERT_TAIL(&igb_filter_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; @@ -1757,7 +1823,7 @@ igb_clear_rss_filter(struct rte_eth_dev *dev) struct e1000_filter_info *filter = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter->rss_info.num) + if (filter->rss_info.conf.queue_num) igb_config_rss_filter(dev, &filter->rss_info, FALSE); }