X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsoftnic%2Frte_eth_softnic_flow.c;h=f05ff092fafe4da69731848a38446cecc0c68b03;hb=21ecbde99b2f0772d36eef8276a686f3135e5de9;hp=03d41bc01e57810256abac1a5ddcf23e715912e0;hpb=7e30e444c3e44e2a7dc68f2ad6d6efc831e73d63;p=dpdk.git diff --git a/drivers/net/softnic/rte_eth_softnic_flow.c b/drivers/net/softnic/rte_eth_softnic_flow.c index 03d41bc01e..f05ff092fa 100644 --- a/drivers/net/softnic/rte_eth_softnic_flow.c +++ b/drivers/net/softnic/rte_eth_softnic_flow.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "rte_eth_softnic_internals.h" #include "rte_eth_softnic.h" @@ -55,7 +56,7 @@ flow_attr_map_set(struct pmd_internals *softnic, map = (ingress) ? &softnic->flow.ingress_map[group_id] : &softnic->flow.egress_map[group_id]; - strcpy(map->pipeline_name, pipeline_name); + strlcpy(map->pipeline_name, pipeline_name, sizeof(map->pipeline_name)); map->table_id = table_id; map->valid = 1; @@ -1167,6 +1168,8 @@ flow_rule_action_get(struct pmd_internals *softnic, struct softnic_table_action_profile_params *params; int n_jump_queue_rss_drop = 0; int n_count = 0; + int n_mark = 0; + int n_vxlan_decap = 0; profile = softnic_table_action_profile_find(softnic, table->params.action_profile_name); @@ -1280,7 +1283,8 @@ flow_rule_action_get(struct pmd_internals *softnic, action, "QUEUE: Invalid RX queue ID"); - sprintf(name, "RXQ%u", (uint32_t)conf->index); + snprintf(name, sizeof(name), "RXQ%u", + (uint32_t)conf->index); status = softnic_pipeline_port_out_find(softnic, pipeline->name, @@ -1370,7 +1374,7 @@ flow_rule_action_get(struct pmd_internals *softnic, action, "RSS: Invalid RX queue ID"); - sprintf(name, "RXQ%u", + snprintf(name, sizeof(name), "RXQ%u", (uint32_t)conf->queue[i]); status = softnic_pipeline_port_out_find(softnic, @@ -1474,6 +1478,74 @@ flow_rule_action_get(struct pmd_internals *softnic, break; } /* RTE_FLOW_ACTION_TYPE_COUNT */ + case RTE_FLOW_ACTION_TYPE_MARK: + { + const struct rte_flow_action_mark *conf = action->conf; + + if (conf == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "MARK: Null configuration"); + + if (n_mark) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "Only one MARK action per flow"); + + if ((params->action_mask & + (1LLU << RTE_TABLE_ACTION_TAG)) == 0) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "MARK action not supported by this table"); + + n_mark = 1; + + /* RTE_TABLE_ACTION_TAG */ + rule_action->tag.tag = conf->id; + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_TAG; + break; + } /* RTE_FLOW_ACTION_TYPE_MARK */ + + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + { + const struct rte_flow_action_mark *conf = action->conf; + + if (conf) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "VXLAN DECAP: Non-null configuration"); + + if (n_vxlan_decap) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "Only one VXLAN DECAP action per flow"); + + if ((params->action_mask & + (1LLU << RTE_TABLE_ACTION_DECAP)) == 0) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "VXLAN DECAP action not supported by this table"); + + n_vxlan_decap = 1; + + /* RTE_TABLE_ACTION_DECAP */ + rule_action->decap.n = 50; /* Ether/IPv4/UDP/VXLAN */ + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_DECAP; + break; + } /* RTE_FLOW_ACTION_TYPE_VXLAN_DECAP */ + case RTE_FLOW_ACTION_TYPE_METER: { const struct rte_flow_action_meter *conf = action->conf; @@ -1552,17 +1624,209 @@ flow_rule_action_get(struct pmd_internals *softnic, /* RTE_TABLE_ACTION_METER */ rule_action->mtr.mtr[0].meter_profile_id = meter_profile_id; - rule_action->mtr.mtr[0].policer[e_RTE_METER_GREEN] = - (enum rte_table_action_policer)m->params.action[RTE_MTR_GREEN]; - rule_action->mtr.mtr[0].policer[e_RTE_METER_YELLOW] = - (enum rte_table_action_policer)m->params.action[RTE_MTR_YELLOW]; - rule_action->mtr.mtr[0].policer[e_RTE_METER_RED] = - (enum rte_table_action_policer)m->params.action[RTE_MTR_RED]; + rule_action->mtr.mtr[0].policer[RTE_COLOR_GREEN] = + softnic_table_action_policer(m->params.action[RTE_COLOR_GREEN]); + rule_action->mtr.mtr[0].policer[RTE_COLOR_YELLOW] = + softnic_table_action_policer(m->params.action[RTE_COLOR_YELLOW]); + rule_action->mtr.mtr[0].policer[RTE_COLOR_RED] = + softnic_table_action_policer(m->params.action[RTE_COLOR_RED]); rule_action->mtr.tc_mask = 1; rule_action->action_mask |= 1 << RTE_TABLE_ACTION_MTR; break; } /* RTE_FLOW_ACTION_TYPE_METER */ + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + { + const struct rte_flow_action_vxlan_encap *conf = + action->conf; + const struct rte_flow_item *item; + union flow_item spec, mask; + int disabled = 0, status; + size_t size; + + if (conf == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "VXLAN ENCAP: Null configuration"); + + item = conf->definition; + if (item == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "VXLAN ENCAP: Null configuration definition"); + + if (!(params->action_mask & + (1LLU << RTE_TABLE_ACTION_ENCAP))) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "VXLAN ENCAP: Encap action not enabled for this table"); + + /* Check for Ether. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: first encap item should be ether"); + } + rte_ether_addr_copy(&spec.eth.dst, + &rule_action->encap.vxlan.ether.da); + rte_ether_addr_copy(&spec.eth.src, + &rule_action->encap.vxlan.ether.sa); + + item++; + + /* Check for VLAN. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + if (!params->encap.vxlan.vlan) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: vlan encap not supported by table"); + + uint16_t tci = rte_ntohs(spec.vlan.tci); + rule_action->encap.vxlan.vlan.pcp = + tci >> 13; + rule_action->encap.vxlan.vlan.dei = + (tci >> 12) & 0x1; + rule_action->encap.vxlan.vlan.vid = + tci & 0xfff; + + item++; + + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, + &mask, &size, &disabled, error); + if (status) + return status; + } else { + if (params->encap.vxlan.vlan) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: expecting vlan encap item"); + } + + /* Check for IPV4/IPV6. */ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + { + rule_action->encap.vxlan.ipv4.sa = + rte_ntohl(spec.ipv4.hdr.src_addr); + rule_action->encap.vxlan.ipv4.da = + rte_ntohl(spec.ipv4.hdr.dst_addr); + rule_action->encap.vxlan.ipv4.dscp = + spec.ipv4.hdr.type_of_service >> 2; + rule_action->encap.vxlan.ipv4.ttl = + spec.ipv4.hdr.time_to_live; + break; + } + case RTE_FLOW_ITEM_TYPE_IPV6: + { + uint32_t vtc_flow; + + memcpy(&rule_action->encap.vxlan.ipv6.sa, + &spec.ipv6.hdr.src_addr, + sizeof(spec.ipv6.hdr.src_addr)); + memcpy(&rule_action->encap.vxlan.ipv6.da, + &spec.ipv6.hdr.dst_addr, + sizeof(spec.ipv6.hdr.dst_addr)); + vtc_flow = rte_ntohl(spec.ipv6.hdr.vtc_flow); + rule_action->encap.vxlan.ipv6.flow_label = + vtc_flow & 0xfffff; + rule_action->encap.vxlan.ipv6.dscp = + (vtc_flow >> 22) & 0x3f; + rule_action->encap.vxlan.ipv6.hop_limit = + spec.ipv6.hdr.hop_limits; + break; + } + default: + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: encap item after ether should be ipv4/ipv6"); + } + + item++; + + /* Check for UDP. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: encap item after ipv4/ipv6 should be udp"); + } + rule_action->encap.vxlan.udp.sp = + rte_ntohs(spec.udp.hdr.src_port); + rule_action->encap.vxlan.udp.dp = + rte_ntohs(spec.udp.hdr.dst_port); + + item++; + + /* Check for VXLAN. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: encap item after udp should be vxlan"); + } + rule_action->encap.vxlan.vxlan.vni = + (spec.vxlan.vni[0] << 16U | + spec.vxlan.vni[1] << 8U + | spec.vxlan.vni[2]); + + item++; + + /* Check for END. */ + flow_item_skip_void(&item); + + if (item->type != RTE_FLOW_ITEM_TYPE_END) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: expecting END item"); + + rule_action->encap.type = RTE_TABLE_ACTION_ENCAP_VXLAN; + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + break; + } /* RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP */ + default: return -ENOTSUP; } @@ -1915,6 +2179,53 @@ pmd_flow_destroy(struct rte_eth_dev *dev, return 0; } +static int +pmd_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct pmd_internals *softnic = dev->data->dev_private; + struct pipeline *pipeline; + int fail_to_del_rule = 0; + uint32_t i; + + TAILQ_FOREACH(pipeline, &softnic->pipeline_list, node) { + /* Remove all the flows added to the tables. */ + for (i = 0; i < pipeline->n_tables; i++) { + struct softnic_table *table = &pipeline->table[i]; + struct rte_flow *flow; + void *temp; + int status; + + TAILQ_FOREACH_SAFE(flow, &table->flows, node, temp) { + /* Rule delete. */ + status = softnic_pipeline_table_rule_delete + (softnic, + pipeline->name, + i, + &flow->match); + if (status) + fail_to_del_rule = 1; + /* Update dependencies */ + if (is_meter_action_enable(softnic, table)) + flow_meter_owner_reset(softnic, flow); + + /* Flow delete. */ + TAILQ_REMOVE(&table->flows, flow, node); + free(flow); + } + } + } + + if (fail_to_del_rule) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Some of the rules could not be deleted"); + + return 0; +} + static int pmd_flow_query(struct rte_eth_dev *dev __rte_unused, struct rte_flow *flow, @@ -1971,7 +2282,7 @@ const struct rte_flow_ops pmd_flow_ops = { .validate = pmd_flow_validate, .create = pmd_flow_create, .destroy = pmd_flow_destroy, - .flush = NULL, + .flush = pmd_flow_flush, .query = pmd_flow_query, .isolate = NULL, };