net/i40e: fix incorrect packet index reference
[dpdk.git] / drivers / net / i40e / i40e_rxtx.c
index 9bfc7b8..d5e6453 100644 (file)
 
 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
 
+#ifdef RTE_LIBRTE_IEEE1588
+#define I40E_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define I40E_TX_IEEE1588_TMST 0
+#endif
+
 #define I40E_TX_CKSUM_OFFLOAD_MASK (            \
                PKT_TX_IP_CKSUM |                \
                PKT_TX_L4_MASK |                 \
@@ -87,7 +93,9 @@
                PKT_TX_OUTER_IP_CKSUM | \
                PKT_TX_TCP_SEG |        \
                PKT_TX_QINQ_PKT |       \
-               PKT_TX_VLAN_PKT)
+               PKT_TX_VLAN_PKT |       \
+               PKT_TX_TUNNEL_MASK |    \
+               I40E_TX_IEEE1588_TMST)
 
 #define I40E_TX_OFFLOAD_NOTSUP_MASK \
                (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
@@ -269,7 +277,7 @@ i40e_parse_tunneling_params(uint64_t ol_flags,
                *cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
                break;
        default:
-               PMD_TX_LOG(ERR, "Tunnel type not supported\n");
+               PMD_TX_LOG(ERR, "Tunnel type not supported");
                return;
        }
 
@@ -1026,7 +1034,6 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        uint16_t nb_tx;
        uint32_t td_cmd;
        uint32_t td_offset;
-       uint32_t tx_flags;
        uint32_t td_tag;
        uint64_t ol_flags;
        uint16_t nb_used;
@@ -1050,7 +1057,6 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                td_cmd = 0;
                td_tag = 0;
                td_offset = 0;
-               tx_flags = 0;
 
                tx_pkt = *tx_pkts++;
                RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
@@ -1097,12 +1103,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                /* Descriptor based VLAN insertion */
                if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
-                       tx_flags |= tx_pkt->vlan_tci <<
-                               I40E_TX_FLAG_L2TAG1_SHIFT;
-                       tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
                        td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
-                       td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
-                                               I40E_TX_FLAG_L2TAG1_SHIFT;
+                       td_tag = tx_pkt->vlan_tci;
                }
 
                /* Always enable CRC offload insertion */
@@ -1868,11 +1870,6 @@ i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        struct i40e_rx_queue *rxq;
        uint16_t desc = 0;
 
-       if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
-               PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
-               return 0;
-       }
-
        rxq = dev->data->rx_queues[rx_queue_id];
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);
        while ((desc < rxq->nb_rx_desc) &&
@@ -1920,6 +1917,64 @@ i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
        return ret;
 }
 
+int
+i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+       struct i40e_rx_queue *rxq = rx_queue;
+       volatile uint64_t *status;
+       uint64_t mask;
+       uint32_t desc;
+
+       if (unlikely(offset >= rxq->nb_rx_desc))
+               return -EINVAL;
+
+       if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+               return RTE_ETH_RX_DESC_UNAVAIL;
+
+       desc = rxq->rx_tail + offset;
+       if (desc >= rxq->nb_rx_desc)
+               desc -= rxq->nb_rx_desc;
+
+       status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+       mask = rte_le_to_cpu_64((1ULL << I40E_RX_DESC_STATUS_DD_SHIFT)
+               << I40E_RXD_QW1_STATUS_SHIFT);
+       if (*status & mask)
+               return RTE_ETH_RX_DESC_DONE;
+
+       return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+       struct i40e_tx_queue *txq = tx_queue;
+       volatile uint64_t *status;
+       uint64_t mask, expect;
+       uint32_t desc;
+
+       if (unlikely(offset >= txq->nb_tx_desc))
+               return -EINVAL;
+
+       desc = txq->tx_tail + offset;
+       /* go to next desc that has the RS bit */
+       desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+               txq->tx_rs_thresh;
+       if (desc >= txq->nb_tx_desc) {
+               desc -= txq->nb_tx_desc;
+               if (desc >= txq->nb_tx_desc)
+                       desc -= txq->nb_tx_desc;
+       }
+
+       status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+       mask = rte_le_to_cpu_64(I40E_TXD_QW1_DTYPE_MASK);
+       expect = rte_cpu_to_le_64(
+               I40E_TX_DESC_DTYPE_DESC_DONE << I40E_TXD_QW1_DTYPE_SHIFT);
+       if ((*status & mask) == expect)
+               return RTE_ETH_TX_DESC_DONE;
+
+       return RTE_ETH_TX_DESC_FULL;
+}
+
 int
 i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        uint16_t queue_idx,
@@ -2201,11 +2256,11 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
        for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
                ((volatile char *)rxq->rx_ring)[i] = 0;
 
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
        memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
        for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
                rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
 
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
        rxq->rx_nb_avail = 0;
        rxq->rx_next_avail = 0;
        rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);