net/mlx5: fix flow mark action handling
[dpdk.git] / drivers / net / mlx5 / mlx5_rxtx.c
index 40f2c47..b0551cc 100644 (file)
@@ -344,6 +344,82 @@ mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
        *dst = *src;
 }
 
+/**
+ * DPDK callback to check the status of a tx descriptor.
+ *
+ * @param tx_queue
+ *   The tx queue.
+ * @param[in] offset
+ *   The index of the descriptor in the ring.
+ *
+ * @return
+ *   The status of the tx descriptor.
+ */
+int
+mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+       struct txq *txq = tx_queue;
+       const unsigned int elts_n = 1 << txq->elts_n;
+       const unsigned int elts_cnt = elts_n - 1;
+       unsigned int used;
+
+       txq_complete(txq);
+       used = (txq->elts_head - txq->elts_tail) & elts_cnt;
+       if (offset < used)
+               return RTE_ETH_TX_DESC_FULL;
+       return RTE_ETH_TX_DESC_DONE;
+}
+
+/**
+ * DPDK callback to check the status of a rx descriptor.
+ *
+ * @param rx_queue
+ *   The rx queue.
+ * @param[in] offset
+ *   The index of the descriptor in the ring.
+ *
+ * @return
+ *   The status of the tx descriptor.
+ */
+int
+mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+       struct rxq *rxq = rx_queue;
+       struct rxq_zip *zip = &rxq->zip;
+       volatile struct mlx5_cqe *cqe;
+       const unsigned int cqe_n = (1 << rxq->cqe_n);
+       const unsigned int cqe_cnt = cqe_n - 1;
+       unsigned int cq_ci;
+       unsigned int used;
+
+       /* if we are processing a compressed cqe */
+       if (zip->ai) {
+               used = zip->cqe_cnt - zip->ca;
+               cq_ci = zip->cq_ci;
+       } else {
+               used = 0;
+               cq_ci = rxq->cq_ci;
+       }
+       cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
+       while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
+               int8_t op_own;
+               unsigned int n;
+
+               op_own = cqe->op_own;
+               if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
+                       n = ntohl(cqe->byte_cnt);
+               else
+                       n = 1;
+               cq_ci += n;
+               used += n;
+               cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
+       }
+       used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
+       if (offset < used)
+               return RTE_ETH_RX_DESC_DONE;
+       return RTE_ETH_RX_DESC_AVAIL;
+}
+
 /**
  * DPDK callback for TX.
  *
@@ -1106,6 +1182,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                                        mpw.data.raw += length;
                        }
                        ++mpw.pkts_n;
+                       mpw.total_len += length;
                        ++j;
                        if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
                                mlx5_mpw_inline_close(txq, &mpw);
@@ -1115,7 +1192,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                                inline_room -= length;
                        }
                }
-               mpw.total_len += length;
                elts_head = elts_head_next;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                /* Increment sent bytes counter. */
@@ -1431,7 +1507,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        }
                        if (rxq->mark &&
                            ((cqe->sop_drop_qpn !=
-                             htonl(MLX5_FLOW_MARK_INVALID)) ||
+                             htonl(MLX5_FLOW_MARK_INVALID)) &&
                             (cqe->sop_drop_qpn !=
                              htonl(MLX5_FLOW_MARK_DEFAULT)))) {
                                pkt->hash.fdir.hi =
@@ -1447,7 +1523,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                        pkt->ol_flags |=
                                                rxq_cq_to_ol_flags(rxq, cqe);
                                }
-                               if (cqe->hdr_type_etc &
+                               if (ntohs(cqe->hdr_type_etc) &
                                    MLX5_CQE_VLAN_STRIPPED) {
                                        pkt->ol_flags |= PKT_RX_VLAN_PKT |
                                                PKT_RX_VLAN_STRIPPED;