const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
static int
i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
+static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
+ .create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
};
union i40e_filter_t cons_filter;
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
/* Pattern matched ethertype filter */
static enum rte_flow_item_type pattern_ethertype[] = {
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
return ret;
}
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
if (dev->data->dev_conf.fdir_conf.mode !=
RTE_FDIR_MODE_PERFECT) {
rte_flow_error_set(error, ENOTSUP,
bool is_vni_masked = 0;
enum rte_flow_item_type item_type;
bool vxlan_flag = 0;
+ uint32_t tenant_id_be = 0;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
}
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = RTE_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = RTE_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
case RTE_FLOW_ITEM_TYPE_UDP:
- /* IPv4/IPv6/UDP are used to describe protocol,
- * spec amd mask should be NULL.
+ /* UDP is used to describe protocol,
+ * spec and mask should be NULL.
*/
if (item->spec || item->mask) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "Invalid IPv4 item");
+ "Invalid UDP item");
return -rte_errno;
}
break;
& I40E_TCI_MASK;
if (vxlan_spec && vxlan_mask && !is_vni_masked) {
/* If there's vxlan */
- rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
- RTE_DIM(vxlan_spec->vni));
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
if (!o_eth_spec && !o_eth_mask &&
i_eth_spec && i_eth_mask)
filter->filter_type =
/* If there's no inner vlan */
if (vxlan_spec && vxlan_mask && !is_vni_masked) {
/* If there's vxlan */
- rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
- RTE_DIM(vxlan_spec->vni));
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
if (!o_eth_spec && !o_eth_mask &&
i_eth_spec && i_eth_mask)
filter->filter_type =
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
return ret;
}
return ret;
}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_tunnel_filter_set(pf,
+ &cons_filter.tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ default:
+ goto free_flow;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return flow;
+
+free_flow:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ return NULL;
+}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum rte_filter_type filter_type = flow->filter_type;
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_flow_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_flow_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ } else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.inner_mac);
+ cld_filter.inner_vlan = filter->input.inner_vlan;
+ cld_filter.flags = filter->input.flags;
+ cld_filter.tenant_id = filter->input.tenant_id;
+ cld_filter.queue_number = filter->queue;
+
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter, 1);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ ret = i40e_flow_flush_fdir_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_ethertype_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_tunnel_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return -rte_errno;
+ }
+
+ return ret;
+}
+
+static int
+i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ ret = i40e_sw_fdir_filter_del(pf,
+ &fdir_filter->fdir.input);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+ }
+
+ return ret;
+}
+
+/* Flush all ethertype filters */
+static int
+i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_flow_destroy_ethertype_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
+
+/* Flush all tunnel filters */
+static int
+i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_flow_destroy_tunnel_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}