X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rx.c;h=11ea935d72f0c3360466a0a096221bd9f1ed81c0;hb=fe3620aabacd504a2b6c807bd0abe18cc1a2da12;hp=6cd71a44ebe6172c78718b6c94594be648b934d1;hpb=a8f0df6bf98d295668b429f65884af887c2c5b77;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c index 6cd71a44eb..11ea935d72 100644 --- a/drivers/net/mlx5/mlx5_rx.c +++ b/drivers/net/mlx5/mlx5_rx.c @@ -18,11 +18,11 @@ #include #include +#include #include "mlx5_autoconf.h" #include "mlx5_defs.h" #include "mlx5.h" -#include "mlx5_mr.h" #include "mlx5_utils.h" #include "mlx5_rxtx.h" #include "mlx5_rx.h" @@ -73,7 +73,7 @@ rx_queue_count(struct mlx5_rxq_data *rxq) const unsigned int cqe_n = (1 << rxq->cqe_n); const unsigned int sges_n = (1 << rxq->sges_n); const unsigned int elts_n = (1 << rxq->elts_n); - const unsigned int strd_n = (1 << rxq->strd_num_n); + const unsigned int strd_n = RTE_BIT32(rxq->log_strd_num); const unsigned int cqe_cnt = cqe_n - 1; unsigned int cq_ci, used; @@ -118,15 +118,7 @@ int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset) { struct mlx5_rxq_data *rxq = rx_queue; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq, struct mlx5_rxq_ctrl, rxq); - struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv); - if (dev->rx_pkt_burst == NULL || - dev->rx_pkt_burst == removed_rx_burst) { - rte_errno = ENOTSUP; - return -rte_errno; - } if (offset >= (1 << rxq->cqe_n)) { rte_errno = EINVAL; return -rte_errno; @@ -156,10 +148,8 @@ void mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, rx_queue_id); + struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, rx_queue_id); if (!rxq) return; @@ -170,12 +160,15 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, qinfo->conf.rx_thresh.wthresh = 0; qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh; qinfo->conf.rx_drop_en = 1; - qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1; + if (rxq_ctrl == NULL || rxq_ctrl->obj == NULL) + qinfo->conf.rx_deferred_start = 0; + else + qinfo->conf.rx_deferred_start = 1; qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; qinfo->scattered_rx = dev->data->scattered_rx; qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ? - (1 << rxq->elts_n) * (1 << rxq->strd_num_n) : - (1 << rxq->elts_n); + RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) : + RTE_BIT32(rxq->elts_n); } /** @@ -185,7 +178,7 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, * Pointer to the device structure. * * @param rx_queue_id - * Rx queue identificatior. + * Rx queue identification. * * @param mode * Pointer to the burts mode information. @@ -199,10 +192,8 @@ mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, struct rte_eth_burst_mode *mode) { eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq; + struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id); - rxq = (*priv->rxqs)[rx_queue_id]; if (!rxq) { rte_errno = EINVAL; return -rte_errno; @@ -240,35 +231,47 @@ mlx5_rx_burst_mode_get(struct rte_eth_dev *dev, /** * DPDK callback to get the number of used descriptors in a RX queue. * - * @param dev - * Pointer to the device structure. - * - * @param rx_queue_id - * The Rx queue. + * @param rx_queue + * The Rx queue pointer. * * @return * The number of used rx descriptor. * -EINVAL if the queue is invalid */ uint32_t -mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +mlx5_rx_queue_count(void *rx_queue) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq; + struct mlx5_rxq_data *rxq = rx_queue; + struct rte_eth_dev *dev; - if (dev->rx_pkt_burst == NULL || - dev->rx_pkt_burst == removed_rx_burst) { - rte_errno = ENOTSUP; - return -rte_errno; - } - rxq = (*priv->rxqs)[rx_queue_id]; if (!rxq) { rte_errno = EINVAL; return -rte_errno; } + + dev = &rte_eth_devices[rxq->port_id]; + + if (dev->rx_pkt_burst == NULL || + dev->rx_pkt_burst == rte_eth_pkt_burst_dummy) { + rte_errno = ENOTSUP; + return -rte_errno; + } + return rx_queue_count(rxq); } +#define CLB_VAL_IDX 0 +#define CLB_MSK_IDX 1 +static int +mlx5_monitor_callback(const uint64_t value, + const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ]) +{ + const uint64_t m = opaque[CLB_MSK_IDX]; + const uint64_t v = opaque[CLB_VAL_IDX]; + + return (value & m) == v ? -1 : 0; +} + int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) { struct mlx5_rxq_data *rxq = rx_queue; @@ -282,8 +285,9 @@ int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) return -rte_errno; } pmc->addr = &cqe->op_own; - pmc->val = !!idx; - pmc->mask = MLX5_CQE_OWNER_MASK; + pmc->opaque[CLB_VAL_IDX] = !!idx; + pmc->opaque[CLB_MSK_IDX] = MLX5_CQE_OWNER_MASK; + pmc->fn = mlx5_monitor_callback; pmc->size = sizeof(uint8_t); return 0; } @@ -343,16 +347,18 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) volatile struct mlx5_wqe_data_seg *scat; uintptr_t addr; uint32_t byte_count; + uint32_t lkey; if (mlx5_rxq_mprq_enabled(rxq)) { struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i]; scat = &((volatile struct mlx5_wqe_mprq *) rxq->wqes)[i].dseg; - addr = (uintptr_t)mlx5_mprq_buf_addr(buf, - 1 << rxq->strd_num_n); - byte_count = (1 << rxq->strd_sz_n) * - (1 << rxq->strd_num_n); + addr = (uintptr_t)mlx5_mprq_buf_addr + (buf, RTE_BIT32(rxq->log_strd_num)); + byte_count = RTE_BIT32(rxq->log_strd_sz) * + RTE_BIT32(rxq->log_strd_num); + lkey = mlx5_rx_addr2mr(rxq, addr); } else { struct rte_mbuf *buf = (*rxq->elts)[i]; @@ -360,13 +366,14 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) rxq->wqes)[i]; addr = rte_pktmbuf_mtod(buf, uintptr_t); byte_count = DATA_LEN(buf); + lkey = mlx5_rx_mb2mr(rxq, buf); } /* scat->addr must be able to store a pointer. */ MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t)); *scat = (struct mlx5_wqe_data_seg){ .addr = rte_cpu_to_be_64(addr), .byte_count = rte_cpu_to_be_32(byte_count), - .lkey = mlx5_rx_addr2mr(rxq, addr), + .lkey = lkey, }; } rxq->consumed_strd = 0; @@ -376,7 +383,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) .ai = 0, }; rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ? - (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0; + (wqe_n >> rxq->sges_n) * RTE_BIT32(rxq->log_strd_num) : 0; /* Update doorbell counter. */ rxq->rq_ci = wqe_n >> rxq->sges_n; rte_io_wmb(); @@ -405,7 +412,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) const uint16_t cqe_n = 1 << rxq->cqe_n; const uint16_t cqe_mask = cqe_n - 1; const uint16_t wqe_n = 1 << rxq->elts_n; - const uint16_t strd_n = 1 << rxq->strd_num_n; + const uint16_t strd_n = RTE_BIT32(rxq->log_strd_num); struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); union { @@ -425,10 +432,10 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) sm.is_wq = 1; sm.queue_id = rxq->idx; sm.state = IBV_WQS_RESET; - if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm)) + if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm)) return -1; if (rxq_ctrl->dump_file_n < - rxq_ctrl->priv->config.max_dump_files_num) { + RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) { MKSTR(err_str, "Unexpected CQE error syndrome " "0x%02x CQN = %u RQN = %u wqe_counter = %u" " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome, @@ -465,8 +472,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) sm.is_wq = 1; sm.queue_id = rxq->idx; sm.state = IBV_WQS_RDY; - if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), - &sm)) + if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm)) return -1; if (vec) { const uint32_t elts_n = @@ -679,10 +685,10 @@ rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe) ol_flags = TRANSPOSE(flags, MLX5_CQE_RX_L3_HDR_VALID, - PKT_RX_IP_CKSUM_GOOD) | + RTE_MBUF_F_RX_IP_CKSUM_GOOD) | TRANSPOSE(flags, MLX5_CQE_RX_L4_HDR_VALID, - PKT_RX_L4_CKSUM_GOOD); + RTE_MBUF_F_RX_L4_CKSUM_GOOD); return ol_flags; } @@ -706,6 +712,7 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, { /* Update packet information. */ pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe); + pkt->port = unlikely(rxq->shared) ? cqe->user_index_low : rxq->port_id; if (rxq->rss_hash) { uint32_t rss_hash_res = 0; @@ -718,7 +725,7 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result); if (rss_hash_res) { pkt->hash.rss = rss_hash_res; - pkt->ol_flags |= PKT_RX_RSS_HASH; + pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; } } if (rxq->mark) { @@ -732,16 +739,16 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, mark = ((mcqe->byte_cnt_flow & 0xff) << 8) | (mcqe->flow_tag_high << 16); if (MLX5_FLOW_MARK_IS_VALID(mark)) { - pkt->ol_flags |= PKT_RX_FDIR; + pkt->ol_flags |= RTE_MBUF_F_RX_FDIR; if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) { - pkt->ol_flags |= PKT_RX_FDIR_ID; + pkt->ol_flags |= RTE_MBUF_F_RX_FDIR_ID; pkt->hash.fdir.hi = mlx5_flow_mark_get(mark); } } } if (rxq->dynf_meta) { - uint32_t meta = cqe->flow_table_metadata & - rxq->flow_meta_port_mask; + uint32_t meta = rte_be_to_cpu_32(cqe->flow_table_metadata) & + rxq->flow_meta_port_mask; if (meta) { pkt->ol_flags |= rxq->flow_meta_mask; @@ -762,7 +769,7 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, vlan_strip = mcqe->hdr_type & RTE_BE16(MLX5_CQE_VLAN_STRIPPED); if (vlan_strip) { - pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + pkt->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info); } } @@ -850,7 +857,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } pkt = seg; MLX5_ASSERT(len >= (rxq->crc_present << 2)); - pkt->ol_flags &= EXT_ATTACHED_MBUF; + pkt->ol_flags &= RTE_MBUF_F_EXTERNAL; rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe); if (rxq->crc_present) len -= RTE_ETHER_CRC_LEN; @@ -859,7 +866,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) mlx5_lro_update_hdr (rte_pktmbuf_mtod(pkt, uint8_t *), cqe, mcqe, rxq, len); - pkt->ol_flags |= PKT_RX_LRO; + pkt->ol_flags |= RTE_MBUF_F_RX_LRO; pkt->tso_segsz = len / cqe->lro_num_seg; } } @@ -1015,20 +1022,6 @@ mlx5_lro_update_hdr(uint8_t *__rte_restrict padd, mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type); } -void -mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque) -{ - struct mlx5_mprq_buf *buf = opaque; - - if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) { - rte_mempool_put(buf->mp, buf); - } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1, - __ATOMIC_RELAXED) == 0)) { - __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED); - rte_mempool_put(buf->mp, buf); - } -} - void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf) { @@ -1052,8 +1045,8 @@ uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct mlx5_rxq_data *rxq = dpdk_rxq; - const uint32_t strd_n = 1 << rxq->strd_num_n; - const uint32_t strd_sz = 1 << rxq->strd_sz_n; + const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num); + const uint32_t strd_sz = RTE_BIT32(rxq->log_strd_sz); const uint32_t cq_mask = (1 << rxq->cqe_n) - 1; const uint32_t wq_mask = (1 << rxq->elts_n) - 1; volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; @@ -1131,7 +1124,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) if (cqe->lro_num_seg > 1) { mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *), cqe, mcqe, rxq, len); - pkt->ol_flags |= PKT_RX_LRO; + pkt->ol_flags |= RTE_MBUF_F_RX_LRO; pkt->tso_segsz = len / cqe->lro_num_seg; } PKT_LEN(pkt) = len; @@ -1160,31 +1153,6 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) return i; } -/** - * Dummy DPDK callback for RX. - * - * This function is used to temporarily replace the real callback during - * unsafe control operations on the queue, or in case of error. - * - * @param dpdk_rxq - * Generic pointer to RX queue structure. - * @param[out] pkts - * Array to store received packets. - * @param pkts_n - * Maximum number of packets in array. - * - * @return - * Number of packets successfully received (<= pkts_n). - */ -uint16_t -removed_rx_burst(void *dpdk_rxq __rte_unused, - struct rte_mbuf **pkts __rte_unused, - uint16_t pkts_n __rte_unused) -{ - rte_mb(); - return 0; -} - /* * Vectorized Rx routines are not compiled in when required vector instructions * are not supported on a target architecture.