info->max_rx_queues = max;
info->max_tx_queues = max;
info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_offload_capa = 0;
info->tx_offload_capa = mlx4_get_tx_port_offloads(priv);
- if (priv->hw_csum) {
- info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
- }
+ info->rx_queue_offload_capa = mlx4_get_rx_queue_offloads(priv);
+ info->rx_offload_capa = (mlx4_get_rx_port_offloads(priv) |
+ info->rx_queue_offload_capa);
if (mlx4_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE;
* - MAC flow rules are generated from @p dev->data->mac_addrs
* (@p priv->mac array).
* - An additional flow rule for Ethernet broadcasts is also generated.
- * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
+ * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER
* is enabled and VLAN filters are configured.
*
* @param priv
};
struct ether_addr *rule_mac = ð_spec.dst;
rte_be16_t *rule_vlan =
- priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
+ (priv->dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER) &&
!priv->dev->data->promiscuous ?
&vlan_spec.tci :
NULL;
}
}
+/**
+ * Returns the per-queue supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Tx offloads.
+ */
+uint64_t
+mlx4_get_rx_queue_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_SCATTER;
+
+ if (priv->hw_csum)
+ offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ return offloads;
+}
+
+/**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Supported Rx offloads.
+ */
+uint64_t
+mlx4_get_rx_port_offloads(struct priv *priv)
+{
+ uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ (void)priv;
+ return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param requested
+ * Per-queue offloads configuration.
+ *
+ * @return
+ * Nonzero when configuration is valid.
+ */
+static int
+mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
+{
+ uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
+ uint64_t supported = mlx4_get_rx_port_offloads(priv);
+
+ return !((mandatory ^ requested) & supported);
+}
+
/**
* DPDK callback to configure a Rx queue.
*
(void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
+ if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
+ rte_errno = ENOTSUP;
+ ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
+ "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
+ (void *)dev, conf->offloads,
+ dev->data->dev_conf.rxmode.offloads,
+ (mlx4_get_rx_port_offloads(priv) |
+ mlx4_get_rx_queue_offloads(priv)));
+ return -rte_errno;
+ }
if (idx >= dev->data->nb_rx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
.elts_n = rte_log2_u32(desc),
.elts = elts,
/* Toggle Rx checksum offload if hardware supports it. */
- .csum = (priv->hw_csum &&
- dev->data->dev_conf.rxmode.hw_ip_checksum),
- .csum_l2tun = (priv->hw_csum_l2tun &&
- dev->data->dev_conf.rxmode.hw_ip_checksum),
+ .csum = priv->hw_csum &&
+ (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ .csum_l2tun = priv->hw_csum_l2tun &&
+ (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
.l2tun_offload = priv->hw_csum_l2tun,
.stats = {
.idx = idx,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
;
- } else if (dev->data->dev_conf.rxmode.enable_scatter) {
+ } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;