* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx5_flow_item_acceptable(const struct rte_flow_item *item,
const uint8_t *mask,
const uint8_t *nic_mask,
#define MLX5_FLOW_LAYER_GRE (1u << 14)
#define MLX5_FLOW_LAYER_MPLS (1u << 15)
+/* General pattern items bits. */
+#define MLX5_FLOW_ITEM_METADATA (1u << 16)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
int mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
struct rte_flow_error *error);
+int mlx5_flow_item_acceptable(const struct rte_flow_item *item,
+ const uint8_t *mask,
+ const uint8_t *nic_mask,
+ unsigned int size,
+ struct rte_flow_error *error);
int mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
uint64_t item_flags,
struct rte_flow_error *error);
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+/**
+ * Validate META item.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ * Item specification.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_meta(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_meta *spec = item->spec;
+ const struct rte_flow_item_meta *mask = item->mask;
+ const struct rte_flow_item_meta nic_mask = {
+ .data = RTE_BE32(UINT32_MAX)
+ };
+ int ret;
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
+ if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
+ return rte_flow_error_set(error, EPERM,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "match on metadata offload "
+ "configuration is off for this port");
+ if (!spec)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ item->spec,
+ "data cannot be empty");
+ if (!spec->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ NULL,
+ "data cannot be zero");
+ if (!mask)
+ mask = &rte_flow_item_meta_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_meta),
+ error);
+ if (ret < 0)
+ return ret;
+ if (attr->ingress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "pattern not supported for ingress");
+ return 0;
+}
+
/**
* Verify the @p attributes will be correctly understood by the NIC and store
* them in the @p flow if everything is correct.
return ret;
item_flags |= MLX5_FLOW_LAYER_MPLS;
break;
+ case RTE_FLOW_ITEM_TYPE_META:
+ ret = flow_dv_validate_item_meta(dev, items, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_ITEM_METADATA;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
}
+/**
+ * Add META item to matcher
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_meta(void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const struct rte_flow_item_meta *meta_m;
+ const struct rte_flow_item_meta *meta_v;
+ void *misc2_m =
+ MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
+ void *misc2_v =
+ MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
+
+ meta_m = (const void *)item->mask;
+ if (!meta_m)
+ meta_m = &rte_flow_item_meta_mask;
+ meta_v = (const void *)item->spec;
+ if (meta_v) {
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
+ rte_be_to_cpu_32(meta_m->data));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
+ rte_be_to_cpu_32(meta_v->data & meta_m->data));
+ }
+}
+
/**
* Update the matcher and the value based the selected item.
*
flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
inner);
break;
+ case RTE_FLOW_ITEM_TYPE_META:
+ flow_dv_translate_item_meta(tmatcher->mask.buf, key, item);
+ break;
default:
break;
}
uint8_t cs_flags;
uint8_t rsvd1;
uint16_t mss;
- uint32_t rsvd2;
+ uint32_t flow_table_metadata;
uint16_t inline_hdr_sz;
uint8_t inline_hdr[2];
} __rte_aligned(MLX5_WQE_DWORD_SIZE);
uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
uint32_t swp_offsets = 0;
uint8_t swp_types = 0;
+ rte_be32_t metadata;
uint16_t tso_segsz = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t total_length = 0;
cs_flags = txq_ol_cksum_to_cs(buf);
txq_mbuf_to_swp(txq, buf, (uint8_t *)&swp_offsets, &swp_types);
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
+ /* Copy metadata from mbuf if valid */
+ metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
+ 0;
/* Replace the Ethernet type by the VLAN if necessary. */
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
swp_offsets,
cs_flags | (swp_types << 8) |
(rte_cpu_to_be_16(tso_segsz) << 16),
- 0,
+ metadata,
(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
};
} else {
wqe->eseg = (rte_v128u32_t){
swp_offsets,
cs_flags | (swp_types << 8),
- 0,
+ metadata,
(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
};
}
mpw->wqe->eseg.inline_hdr_sz = 0;
mpw->wqe->eseg.rsvd0 = 0;
mpw->wqe->eseg.rsvd1 = 0;
- mpw->wqe->eseg.rsvd2 = 0;
+ mpw->wqe->eseg.flow_table_metadata = 0;
mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) |
MLX5_OPCODE_TSO);
uint32_t length;
unsigned int segs_n = buf->nb_segs;
uint32_t cs_flags;
+ rte_be32_t metadata;
/*
* Make sure there is enough room to store this packet and
max_elts -= segs_n;
--pkts_n;
cs_flags = txq_ol_cksum_to_cs(buf);
+ /* Copy metadata from mbuf if valid */
+ metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
+ 0;
/* Retrieve packet information. */
length = PKT_LEN(buf);
assert(length);
if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
((mpw.len != length) ||
(segs_n != 1) ||
+ (mpw.wqe->eseg.flow_table_metadata != metadata) ||
(mpw.wqe->eseg.cs_flags != cs_flags)))
mlx5_mpw_close(txq, &mpw);
if (mpw.state == MLX5_MPW_STATE_CLOSED) {
max_wqe -= 2;
mlx5_mpw_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
+ mpw.wqe->eseg.flow_table_metadata = metadata;
}
/* Multi-segment packets must be alone in their MPW. */
assert((segs_n == 1) || (mpw.pkts_n == 0));
mpw->wqe->eseg.cs_flags = 0;
mpw->wqe->eseg.rsvd0 = 0;
mpw->wqe->eseg.rsvd1 = 0;
- mpw->wqe->eseg.rsvd2 = 0;
+ mpw->wqe->eseg.flow_table_metadata = 0;
inl = (struct mlx5_wqe_inl_small *)
(((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE);
mpw->data.raw = (uint8_t *)&inl->raw;
uint32_t length;
unsigned int segs_n = buf->nb_segs;
uint8_t cs_flags;
+ rte_be32_t metadata;
/*
* Make sure there is enough room to store this packet and
*/
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
cs_flags = txq_ol_cksum_to_cs(buf);
+ /* Copy metadata from mbuf if valid */
+ metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
+ 0;
/* Retrieve packet information. */
length = PKT_LEN(buf);
/* Start new session if packet differs. */
if (mpw.state == MLX5_MPW_STATE_OPENED) {
if ((mpw.len != length) ||
(segs_n != 1) ||
+ (mpw.wqe->eseg.flow_table_metadata != metadata) ||
(mpw.wqe->eseg.cs_flags != cs_flags))
mlx5_mpw_close(txq, &mpw);
} else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
if ((mpw.len != length) ||
(segs_n != 1) ||
(length > inline_room) ||
+ (mpw.wqe->eseg.flow_table_metadata != metadata) ||
(mpw.wqe->eseg.cs_flags != cs_flags)) {
mlx5_mpw_inline_close(txq, &mpw);
inline_room =
max_wqe -= 2;
mlx5_mpw_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
+ mpw.wqe->eseg.flow_table_metadata = metadata;
} else {
if (unlikely(max_wqe < wqe_inl_n))
break;
max_wqe -= wqe_inl_n;
mlx5_mpw_inline_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
+ mpw.wqe->eseg.flow_table_metadata = metadata;
}
}
/* Multi-segment packets must be alone in their MPW. */
unsigned int do_inline = 0; /* Whether inline is possible. */
uint32_t length;
uint8_t cs_flags;
+ rte_be32_t metadata;
/* Multi-segmented packet is handled in slow-path outside. */
assert(NB_SEGS(buf) == 1);
if (max_elts - j == 0)
break;
cs_flags = txq_ol_cksum_to_cs(buf);
+ /* Copy metadata from mbuf if valid */
+ metadata = buf->ol_flags & PKT_TX_METADATA ? buf->tx_metadata :
+ 0;
/* Retrieve packet information. */
length = PKT_LEN(buf);
/* Start new session if:
(length <= txq->inline_max_packet_sz &&
inl_pad + sizeof(inl_hdr) + length >
mpw_room) ||
+ (mpw.wqe->eseg.flow_table_metadata != metadata) ||
(mpw.wqe->eseg.cs_flags != cs_flags))
max_wqe -= mlx5_empw_close(txq, &mpw);
}
sizeof(inl_hdr) + length <= mpw_room &&
!txq->mpw_hdr_dseg;
mpw.wqe->eseg.cs_flags = cs_flags;
+ mpw.wqe->eseg.flow_table_metadata = metadata;
} else {
/* Evaluate whether the next packet can be inlined.
* Inlininig is possible when:
#endif
/**
- * Count the number of packets having same ol_flags and calculate cs_flags.
+ * Count the number of packets having same ol_flags and same metadata (if
+ * PKT_TX_METADATA is set in ol_flags), and calculate cs_flags.
*
* @param pkts
* Pointer to array of packets.
* Number of packets.
* @param cs_flags
* Pointer of flags to be returned.
+ * @param metadata
+ * Pointer of metadata to be returned.
+ * @param txq_offloads
+ * Offloads enabled on Tx queue
*
* @return
- * Number of packets having same ol_flags.
+ * Number of packets having same ol_flags and metadata, if relevant.
*/
static inline unsigned int
-txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags)
+txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags,
+ rte_be32_t *metadata, const uint64_t txq_offloads)
{
unsigned int pos;
- const uint64_t ol_mask =
+ const uint64_t cksum_ol_mask =
PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_TUNNEL_GRE |
PKT_TX_TUNNEL_VXLAN | PKT_TX_OUTER_IP_CKSUM;
+ rte_be32_t p0_metadata, pn_metadata;
if (!pkts_n)
return 0;
- /* Count the number of packets having same ol_flags. */
- for (pos = 1; pos < pkts_n; ++pos)
- if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
+ p0_metadata = pkts[0]->ol_flags & PKT_TX_METADATA ?
+ pkts[0]->tx_metadata : 0;
+ /* Count the number of packets having same offload parameters. */
+ for (pos = 1; pos < pkts_n; ++pos) {
+ /* Check if packet has same checksum flags. */
+ if ((txq_offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP) &&
+ ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & cksum_ol_mask))
break;
+ /* Check if packet has same metadata. */
+ if (txq_offloads & DEV_TX_OFFLOAD_MATCH_METADATA) {
+ pn_metadata = pkts[pos]->ol_flags & PKT_TX_METADATA ?
+ pkts[pos]->tx_metadata : 0;
+ if (pn_metadata != p0_metadata)
+ break;
+ }
+ }
*cs_flags = txq_ol_cksum_to_cs(pkts[0]);
+ *metadata = p0_metadata;
return pos;
}
uint16_t ret;
n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
- ret = txq_burst_v(txq, &pkts[nb_tx], n, 0);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, 0, 0);
nb_tx += ret;
if (!ret)
break;
uint8_t cs_flags = 0;
uint16_t n;
uint16_t ret;
+ rte_be32_t metadata = 0;
/* Transmit multi-seg packets in the head of pkts list. */
if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
n = txq_count_contig_single_seg(&pkts[nb_tx], n);
- if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
- n = txq_calc_offload(&pkts[nb_tx], n, &cs_flags);
- ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
+ if (txq->offloads & (MLX5_VEC_TX_CKSUM_OFFLOAD_CAP |
+ DEV_TX_OFFLOAD_MATCH_METADATA))
+ n = txq_calc_offload(&pkts[nb_tx], n,
+ &cs_flags, &metadata,
+ txq->offloads);
+ ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags, metadata);
nb_tx += ret;
if (!ret)
break;
/* HW offload capabilities of vectorized Tx. */
#define MLX5_VEC_TX_OFFLOAD_CAP \
(MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
+ DEV_TX_OFFLOAD_MATCH_METADATA | \
DEV_TX_OFFLOAD_MULTI_SEGS)
/*
* Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
* @param cs_flags
* Checksum offload flags to be written in the descriptor.
+ * @param metadata
+ * Metadata value to be written in the descriptor.
*
* @return
* Number of packets successfully transmitted (<= pkts_n).
*/
static inline uint16_t
txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint8_t cs_flags)
+ uint8_t cs_flags, rte_be32_t metadata)
{
struct rte_mbuf **elts;
uint16_t elts_head = txq->elts_head;
ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
vst1q_u8((void *)t_wqe, ctrl);
/* Fill ESEG in the header. */
- vst1q_u8((void *)(t_wqe + 1),
- ((uint8x16_t) { 0, 0, 0, 0,
- cs_flags, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0 }));
+ vst1q_u32((void *)(t_wqe + 1),
+ ((uint32x4_t) { 0, cs_flags, metadata, 0 }));
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += pkts_n;
#endif
* Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
* @param cs_flags
* Checksum offload flags to be written in the descriptor.
+ * @param metadata
+ * Metadata value to be written in the descriptor.
*
* @return
* Number of packets successfully transmitted (<= pkts_n).
*/
static inline uint16_t
txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint8_t cs_flags)
+ uint8_t cs_flags, rte_be32_t metadata)
{
struct rte_mbuf **elts;
uint16_t elts_head = txq->elts_head;
ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
_mm_store_si128(t_wqe, ctrl);
/* Fill ESEG in the header. */
- _mm_store_si128(t_wqe + 1,
- _mm_set_epi8(0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, cs_flags,
- 0, 0, 0, 0));
+ _mm_store_si128(t_wqe + 1, _mm_set_epi32(0, metadata, cs_flags, 0));
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += pkts_n;
#endif
offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
DEV_TX_OFFLOAD_UDP_TNL_TSO);
}
-
if (config->tunnel_en) {
if (config->hw_csum)
offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO);
}
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ if (config->dv_flow_en)
+ offloads |= DEV_TX_OFFLOAD_MATCH_METADATA;
+#endif
return offloads;
}