X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=5fed42324d280b097e4d7d64732d4eafe3f93de7;hb=daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f;hp=7893b3edd444c3997f23326ea605d9cbf29e5bc7;hpb=6507c9f51d9dc3e1ac074ce85bcadaf69afa9dee;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 7893b3edd4..5fed42324d 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -21,6 +21,7 @@ #include #include +#include #include "mlx5_defs.h" #include "mlx5.h" @@ -180,7 +181,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) mbuf_init->nb_segs = 1; mbuf_init->port = rxq->port_id; if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) - mbuf_init->ol_flags = EXT_ATTACHED_MBUF; + mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL; /* * prevent compiler reordering: * rearm_data covers previous fields. @@ -333,23 +334,22 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; - uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | - DEV_RX_OFFLOAD_TIMESTAMP | - DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_RSS_HASH); + uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_TIMESTAMP | + RTE_ETH_RX_OFFLOAD_RSS_HASH); if (!config->mprq.enabled) offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT; if (config->hw_fcs_strip) - offloads |= DEV_RX_OFFLOAD_KEEP_CRC; + offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; if (config->hw_csum) - offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM); + offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM); if (config->hw_vlan_strip) - offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; if (MLX5_LRO_SUPPORTED(dev)) - offloads |= DEV_RX_OFFLOAD_TCP_LRO; + offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO; return offloads; } @@ -363,7 +363,7 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) uint64_t mlx5_get_rx_port_offloads(void) { - uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; + uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER; return offloads; } @@ -695,7 +695,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, dev->data->dev_conf.rxmode.offloads; /* The offloads should be checked on rte_eth_dev layer. */ - MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER); + MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER); if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { DRV_LOG(ERR, "port %u queue index %u split " "offload not configured", @@ -794,25 +794,22 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx, /** * DPDK callback to release a RX queue. * - * @param dpdk_rxq - * Generic RX queue pointer. + * @param dev + * Pointer to Ethernet device structure. + * @param qid + * Receive queue index. */ void -mlx5_rx_queue_release(void *dpdk_rxq) +mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq; - struct mlx5_rxq_ctrl *rxq_ctrl; - struct mlx5_priv *priv; + struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid]; if (rxq == NULL) return; - rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); - priv = rxq_ctrl->priv; - if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx)) + if (!mlx5_rxq_releasable(dev, qid)) rte_panic("port %u Rx queue %u is still used by a flow and" - " cannot be removed\n", - PORT_ID(priv), rxq->idx); - mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx); + " cannot be removed\n", dev->data->port_id, qid); + mlx5_rxq_release(dev, qid); } /** @@ -834,9 +831,6 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) unsigned int count = 0; struct rte_intr_handle *intr_handle = dev->intr_handle; - /* Representor shares dev->intr_handle with PF. */ - if (priv->representor) - return 0; if (!dev->data->dev_conf.intr_conf.rxq) return 0; mlx5_rx_intr_vec_disable(dev); @@ -917,9 +911,6 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); - /* Representor shares dev->intr_handle with PF. */ - if (priv->representor) - return; if (!dev->data->dev_conf.intr_conf.rxq) return; if (!intr_handle->intr_vec) @@ -1171,6 +1162,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) unsigned int strd_sz_n = 0; unsigned int i; unsigned int n_ibv = 0; + int ret; if (!mlx5_mprq_enabled(dev)) return 0; @@ -1240,7 +1232,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id); mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ, 0, NULL, NULL, mlx5_mprq_buf_init, - (void *)(uintptr_t)(1 << strd_num_n), + (void *)((uintptr_t)1 << strd_num_n), dev->device->numa_node, 0); if (mp == NULL) { DRV_LOG(ERR, @@ -1250,6 +1242,16 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) rte_errno = ENOMEM; return -rte_errno; } + ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache, + priv->sh->cdev->pd, mp, &priv->mp_id); + if (ret < 0 && rte_errno != EEXIST) { + ret = rte_errno; + DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ", + dev->data->port_id); + rte_mempool_free(mp); + rte_errno = ret; + return -rte_errno; + } priv->mprq_mp = mp; exit: /* Set mempool for each Rx queue. */ @@ -1335,11 +1337,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_dev_config *config = &priv->config; uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; - unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO); - unsigned int max_rx_pkt_len = lro_on_queue ? + unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO); + unsigned int max_rx_pktlen = lro_on_queue ? dev->data->dev_conf.rxmode.max_lro_pkt_size : - dev->data->dev_conf.rxmode.max_rx_pkt_len; - unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len + + dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN; + unsigned int non_scatter_min_mbuf_size = max_rx_pktlen + RTE_PKTMBUF_HEADROOM; unsigned int max_lro_size = 0; unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM; @@ -1378,7 +1381,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, * needed to handle max size packets, replace zero length * with the buffer length from the pool. */ - tail_len = max_rx_pkt_len; + tail_len = max_rx_pktlen; do { struct mlx5_eth_rxseg *hw_seg = &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n]; @@ -1416,7 +1419,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, "port %u too many SGEs (%u) needed to handle" " requested maximum packet size %u, the maximum" " supported are %u", dev->data->port_id, - tmpl->rxq.rxseg_n, max_rx_pkt_len, + tmpl->rxq.rxseg_n, max_rx_pktlen, MLX5_MAX_RXQ_NSEG); rte_errno = ENOTSUP; goto error; @@ -1437,18 +1440,18 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n)); MLX5_ASSERT(tmpl->rxq.rxseg_n && tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG); - if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) { + if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) { DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not" " configured and no enough mbuf space(%u) to contain " "the maximum RX packet length(%u) with head-room(%u)", - dev->data->port_id, idx, mb_len, max_rx_pkt_len, + dev->data->port_id, idx, mb_len, max_rx_pktlen, RTE_PKTMBUF_HEADROOM); rte_errno = ENOSPC; goto error; } tmpl->type = MLX5_RXQ_TYPE_STANDARD; - if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh, - MLX5_MR_BTREE_CACHE_N, socket)) { + if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl, + &priv->sh->cdev->mr_scache.dev_gen, socket)) { /* rte_errno is already set. */ goto error; } @@ -1460,7 +1463,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, * following conditions are met: * - MPRQ is enabled. * - The number of descs is more than the number of strides. - * - max_rx_pkt_len plus overhead is less than the max size + * - max_rx_pktlen plus overhead is less than the max size * of a stride or mprq_stride_size is specified by a user. * Need to make sure that there are enough strides to encap * the maximum packet size in case mprq_stride_size is set. @@ -1481,10 +1484,10 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, config->mprq.stride_size_n : mprq_stride_size; tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT; tmpl->rxq.strd_scatter_en = - !!(offloads & DEV_RX_OFFLOAD_SCATTER); + !!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER); tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size, config->mprq.max_memcpy_len); - max_lro_size = RTE_MIN(max_rx_pkt_len, + max_lro_size = RTE_MIN(max_rx_pktlen, (1u << tmpl->rxq.strd_num_n) * (1u << tmpl->rxq.strd_sz_n)); DRV_LOG(DEBUG, @@ -1493,10 +1496,10 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, dev->data->port_id, idx, tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); } else if (tmpl->rxq.rxseg_n == 1) { - MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size); + MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size); tmpl->rxq.sges_n = 0; - max_lro_size = max_rx_pkt_len; - } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { + max_lro_size = max_rx_pktlen; + } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { unsigned int sges_n; if (lro_on_queue && first_mb_free_size < @@ -1517,13 +1520,13 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, "port %u too many SGEs (%u) needed to handle" " requested maximum packet size %u, the maximum" " supported are %u", dev->data->port_id, - 1 << sges_n, max_rx_pkt_len, + 1 << sges_n, max_rx_pktlen, 1u << MLX5_MAX_LOG_RQ_SEGS); rte_errno = ENOTSUP; goto error; } tmpl->rxq.sges_n = sges_n; - max_lro_size = max_rx_pkt_len; + max_lro_size = max_rx_pktlen; } if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) DRV_LOG(WARNING, @@ -1557,9 +1560,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size); /* Toggle RX checksum offload if hardware supports it. */ - tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM); + tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM); /* Configure Rx timestamp. */ - tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP); + tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP); tmpl->rxq.timestamp_rx_flag = 0; if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register( &tmpl->rxq.timestamp_offset, @@ -1568,11 +1571,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, goto error; } /* Configure VLAN stripping. */ - tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); /* By default, FCS (CRC) is stripped by hardware. */ tmpl->rxq.crc_present = 0; tmpl->rxq.lro = lro_on_queue; - if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { if (config->hw_fcs_strip) { /* * RQs used for LRO-enabled TIRs should not be @@ -1602,7 +1605,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.crc_present << 2); /* Save port ID. */ tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf && - (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS)); + (!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS)); tmpl->rxq.port_id = dev->data->port_id; tmpl->priv = priv; tmpl->rxq.mp = rx_seg[0].mp; @@ -1712,7 +1715,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; - if (!(*priv->rxqs)[idx]) + if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL) return 0; rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1) @@ -2086,7 +2089,7 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, error: err = rte_errno; for (j = 0; j < i; j++) - mlx5_rxq_release(dev, ind_tbl->queues[j]); + mlx5_rxq_release(dev, queues[j]); rte_errno = err; DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", dev->data->port_id); @@ -2094,23 +2097,19 @@ error: } int -mlx5_hrxq_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, void *cb_ctx) +mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry, + void *cb_ctx) { - struct rte_eth_dev *dev = tool_ctx; struct mlx5_flow_cb_ctx *ctx = cb_ctx; struct mlx5_flow_rss_desc *rss_desc = ctx->data; struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); - struct mlx5_ind_table_obj *ind_tbl; - if (hrxq->rss_key_len != rss_desc->key_len || + return (hrxq->rss_key_len != rss_desc->key_len || memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) || - hrxq->hash_fields != rss_desc->hash_fields) - return 1; - ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue, - rss_desc->queue_num); - if (ind_tbl) - mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone); - return ind_tbl != hrxq->ind_table; + hrxq->hash_fields != rss_desc->hash_fields || + hrxq->ind_table->queues_n != rss_desc->queue_num || + memcmp(hrxq->ind_table->queues, rss_desc->queue, + rss_desc->queue_num * sizeof(rss_desc->queue[0]))); } /**