net/ionic: observe endianness in firmware commands
[dpdk.git] / drivers / net / ionic / ionic_rxtx.c
index f3b46a2..61ad396 100644 (file)
@@ -31,7 +31,7 @@
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_prefetch.h>
 #include <rte_udp.h>
 #include <rte_tcp.h>
@@ -64,10 +64,10 @@ ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 
        qinfo->nb_desc = q->num_descs;
        qinfo->conf.offloads = txq->offloads;
-       qinfo->conf.tx_deferred_start = txq->deferred_start;
+       qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
 }
 
-static inline void __attribute__((cold))
+static inline void __rte_cold
 ionic_tx_flush(struct ionic_cq *cq)
 {
        struct ionic_queue *q = cq->bound_q;
@@ -118,25 +118,30 @@ ionic_tx_flush(struct ionic_cq *cq)
        }
 }
 
-void __attribute__((cold))
+void __rte_cold
 ionic_dev_tx_queue_release(void *tx_queue)
 {
        struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
 
        IONIC_PRINT_CALL();
 
+       ionic_lif_txq_deinit(txq);
+
        ionic_qcq_free(txq);
 }
 
-int __attribute__((cold))
+int __rte_cold
 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 {
        struct ionic_qcq *txq;
 
-       IONIC_PRINT_CALL();
+       IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
 
        txq = eth_dev->data->tx_queues[tx_queue_id];
 
+       eth_dev->data->tx_queue_state[tx_queue_id] =
+               RTE_ETH_QUEUE_STATE_STOPPED;
+
        /*
         * Note: we should better post NOP Tx desc and wait for its completion
         * before disabling Tx queue
@@ -146,17 +151,12 @@ ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 
        ionic_tx_flush(&txq->cq);
 
-       ionic_lif_txq_deinit(txq);
-
-       eth_dev->data->tx_queue_state[tx_queue_id] =
-               RTE_ETH_QUEUE_STATE_STOPPED;
-
        return 0;
 }
 
-int __attribute__((cold))
+int __rte_cold
 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
-               uint16_t nb_desc, uint32_t socket_id __rte_unused,
+               uint16_t nb_desc, uint32_t socket_id,
                const struct rte_eth_txconf *tx_conf)
 {
        struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
@@ -164,11 +164,6 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
        uint64_t offloads;
        int err;
 
-       IONIC_PRINT_CALL();
-
-       IONIC_PRINT(DEBUG, "Configuring TX queue %u with %u buffers",
-               tx_queue_id, nb_desc);
-
        if (tx_queue_id >= lif->ntxqcqs) {
                IONIC_PRINT(DEBUG, "Queue index %u not available "
                        "(max %u queues)",
@@ -177,6 +172,9 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
        }
 
        offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
+       IONIC_PRINT(DEBUG,
+               "Configuring skt %u TX queue %u with %u buffers, offloads %jx",
+               socket_id, tx_queue_id, nb_desc, offloads);
 
        /* Validate number of receive descriptors */
        if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
@@ -189,6 +187,9 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
                eth_dev->data->tx_queues[tx_queue_id] = NULL;
        }
 
+       eth_dev->data->tx_queue_state[tx_queue_id] =
+               RTE_ETH_QUEUE_STATE_STOPPED;
+
        err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq);
        if (err) {
                IONIC_PRINT(DEBUG, "Queue allocation failure");
@@ -196,7 +197,8 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
        }
 
        /* Do not start queue with rte_eth_dev_start() */
-       txq->deferred_start = tx_conf->tx_deferred_start;
+       if (tx_conf->tx_deferred_start)
+               txq->flags |= IONIC_QCQ_F_DEFERRED;
 
        txq->offloads = offloads;
 
@@ -208,38 +210,90 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
 /*
  * Start Transmit Units for specified queue.
  */
-int __attribute__((cold))
+int __rte_cold
 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 {
+       uint8_t *tx_queue_state = eth_dev->data->tx_queue_state;
        struct ionic_qcq *txq;
        int err;
 
-       IONIC_PRINT_CALL();
+       if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
+               IONIC_PRINT(DEBUG, "TX queue %u already started",
+                       tx_queue_id);
+               return 0;
+       }
 
        txq = eth_dev->data->tx_queues[tx_queue_id];
 
-       err = ionic_lif_txq_init(txq);
-       if (err)
-               return err;
+       IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
+               tx_queue_id, txq->q.num_descs);
 
-       ionic_qcq_enable(txq);
+       if (!(txq->flags & IONIC_QCQ_F_INITED)) {
+               err = ionic_lif_txq_init(txq);
+               if (err)
+                       return err;
+       } else {
+               ionic_qcq_enable(txq);
+       }
 
-       eth_dev->data->tx_queue_state[tx_queue_id] =
-               RTE_ETH_QUEUE_STATE_STARTED;
+       tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 
        return 0;
 }
 
+static void
+ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
+{
+       struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
+       char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
+       struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
+               (l3_hdr + txm->l3_len);
+
+       if (txm->ol_flags & PKT_TX_IP_CKSUM) {
+               struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
+               ipv4_hdr->hdr_checksum = 0;
+               tcp_hdr->cksum = 0;
+               tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
+       } else {
+               struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
+               tcp_hdr->cksum = 0;
+               tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
+       }
+}
+
+static void
+ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
+{
+       struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
+       char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
+               txm->outer_l3_len + txm->l2_len;
+       struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
+               (l3_hdr + txm->l3_len);
+
+       if (txm->ol_flags & PKT_TX_IPV4) {
+               struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
+               ipv4_hdr->hdr_checksum = 0;
+               tcp_hdr->cksum = 0;
+               tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
+       } else {
+               struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
+               tcp_hdr->cksum = 0;
+               tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
+       }
+}
+
 static void
 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
                struct rte_mbuf *txm,
                rte_iova_t addr, uint8_t nsge, uint16_t len,
                uint32_t hdrlen, uint32_t mss,
+               bool encap,
                uint16_t vlan_tci, bool has_vlan,
                bool start, bool done)
 {
        uint8_t flags = 0;
        flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
+       flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
        flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
        flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
 
@@ -284,10 +338,29 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm,
        uint32_t len;
        uint32_t offset = 0;
        bool start, done;
+       bool encap;
        bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
        uint16_t vlan_tci = txm->vlan_tci;
+       uint64_t ol_flags = txm->ol_flags;
 
-       hdrlen = txm->l2_len + txm->l3_len;
+       encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
+               (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
+               ((ol_flags & PKT_TX_OUTER_IPV4) ||
+               (ol_flags & PKT_TX_OUTER_IPV6));
+
+       /* Preload inner-most TCP csum field with IP pseudo hdr
+        * calculated with IP length set to zero.  HW will later
+        * add in length to each TCP segment resulting from the TSO.
+        */
+
+       if (encap) {
+               ionic_tx_tcp_inner_pseudo_csum(txm);
+               hdrlen = txm->outer_l2_len + txm->outer_l3_len +
+                       txm->l2_len + txm->l3_len + txm->l4_len;
+       } else {
+               ionic_tx_tcp_pseudo_csum(txm);
+               hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
+       }
 
        seglen = hdrlen + mss;
        left = txm->data_len;
@@ -311,6 +384,7 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm,
                ionic_tx_tso_post(q, desc, txm,
                        desc_addr, desc_nsge, desc_len,
                        hdrlen, mss,
+                       encap,
                        vlan_tci, has_vlan,
                        start, done && not_xmit_more);
                desc = ionic_tx_tso_next(q, &elem);
@@ -352,6 +426,7 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm,
                        ionic_tx_tso_post(q, desc, txm_seg,
                                desc_addr, desc_nsge, desc_len,
                                hdrlen, mss,
+                               encap,
                                vlan_tci, has_vlan,
                                start, done && not_xmit_more);
                        desc = ionic_tx_tso_next(q, &elem);
@@ -368,7 +443,7 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm,
 
 static int
 ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm,
-               uint64_t offloads __rte_unused, bool not_xmit_more)
+               uint64_t offloads, bool not_xmit_more)
 {
        struct ionic_txq_desc *desc_base = q->base;
        struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
@@ -377,15 +452,34 @@ ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm,
        struct ionic_txq_sg_elem *elem = sg_desc->elems;
        struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
        struct rte_mbuf *txm_seg;
+       bool encap;
        bool has_vlan;
        uint64_t ol_flags = txm->ol_flags;
        uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
        uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
        uint8_t flags = 0;
 
+       if ((ol_flags & PKT_TX_IP_CKSUM) &&
+                       (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) {
+               opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
+               flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
+               if (((ol_flags & PKT_TX_TCP_CKSUM) &&
+                               (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
+                               ((ol_flags & PKT_TX_UDP_CKSUM) &&
+                               (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)))
+                       flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
+       } else {
+               stats->no_csum++;
+       }
+
        has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
+       encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
+                       (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
+                       ((ol_flags & PKT_TX_OUTER_IPV4) ||
+                       (ol_flags & PKT_TX_OUTER_IPV6));
 
        flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
+       flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
 
        desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
        desc->len = txm->data_len;
@@ -469,6 +563,7 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        PKT_TX_IPV4 |           \
        PKT_TX_IPV6 |           \
        PKT_TX_VLAN |           \
+       PKT_TX_IP_CKSUM |       \
        PKT_TX_TCP_SEG |        \
        PKT_TX_L4_MASK)
 
@@ -521,11 +616,11 @@ ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        qinfo->mp = rxq->mb_pool;
        qinfo->scattered_rx = dev->data->scattered_rx;
        qinfo->nb_desc = q->num_descs;
-       qinfo->conf.rx_deferred_start = rxq->deferred_start;
+       qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
        qinfo->conf.offloads = rxq->offloads;
 }
 
-static void __attribute__((cold))
+static void __rte_cold
 ionic_rx_empty(struct ionic_queue *q)
 {
        struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
@@ -541,7 +636,7 @@ ionic_rx_empty(struct ionic_queue *q)
        }
 }
 
-void __attribute__((cold))
+void __rte_cold
 ionic_dev_rx_queue_release(void *rx_queue)
 {
        struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
@@ -550,14 +645,16 @@ ionic_dev_rx_queue_release(void *rx_queue)
 
        ionic_rx_empty(&rxq->q);
 
+       ionic_lif_rxq_deinit(rxq);
+
        ionic_qcq_free(rxq);
 }
 
-int __attribute__((cold))
+int __rte_cold
 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
                uint16_t rx_queue_id,
                uint16_t nb_desc,
-               uint32_t socket_id __rte_unused,
+               uint32_t socket_id,
                const struct rte_eth_rxconf *rx_conf,
                struct rte_mempool *mp)
 {
@@ -566,11 +663,6 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
        uint64_t offloads;
        int err;
 
-       IONIC_PRINT_CALL();
-
-       IONIC_PRINT(DEBUG, "Configuring RX queue %u with %u buffers",
-               rx_queue_id, nb_desc);
-
        if (rx_queue_id >= lif->nrxqcqs) {
                IONIC_PRINT(ERR,
                        "Queue index %u not available (max %u queues)",
@@ -579,20 +671,23 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
        }
 
        offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
+       IONIC_PRINT(DEBUG,
+               "Configuring skt %u RX queue %u with %u buffers, offloads %jx",
+               socket_id, rx_queue_id, nb_desc, offloads);
+
+       if (!rx_conf->rx_drop_en)
+               IONIC_PRINT(WARNING, "No-drop mode is not supported");
 
        /* Validate number of receive descriptors */
        if (!rte_is_power_of_2(nb_desc) ||
                        nb_desc < IONIC_MIN_RING_DESC ||
                        nb_desc > IONIC_MAX_RING_DESC) {
                IONIC_PRINT(ERR,
-                       "Bad number of descriptors (%u) for queue %u (min: %u)",
+                       "Bad descriptor count (%u) for queue %u (min: %u)",
                        nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
                return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
        }
 
-       if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
-               eth_dev->data->scattered_rx = 1;
-
        /* Free memory prior to re-allocation if needed... */
        if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
                void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
@@ -600,9 +695,12 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
                eth_dev->data->rx_queues[rx_queue_id] = NULL;
        }
 
+       eth_dev->data->rx_queue_state[rx_queue_id] =
+               RTE_ETH_QUEUE_STATE_STOPPED;
+
        err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq);
        if (err) {
-               IONIC_PRINT(ERR, "Queue allocation failure");
+               IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
                return -EINVAL;
        }
 
@@ -619,7 +717,8 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
         */
 
        /* Do not start queue with rte_eth_dev_start() */
-       rxq->deferred_start = rx_conf->rx_deferred_start;
+       if (rx_conf->rx_deferred_start)
+               rxq->flags |= IONIC_QCQ_F_DEFERRED;
 
        rxq->offloads = offloads;
 
@@ -700,6 +799,10 @@ ionic_rx_clean(struct ionic_queue *q,
                rxm->nb_segs++;
        }
 
+       /* RSS */
+       pkt_flags |= PKT_RX_RSS_HASH;
+       rxm->hash.rss = cq_desc->rss_hash;
+
        /* Vlan Strip */
        if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
                pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
@@ -785,7 +888,7 @@ ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
        ionic_q_post(q, true, ionic_rx_clean, mbuf);
 }
 
-static int __attribute__((cold))
+static int __rte_cold
 ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
 {
        struct ionic_queue *q = &rxq->q;
@@ -862,23 +965,32 @@ ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
 /*
  * Start Receive Units for specified queue.
  */
-int __attribute__((cold))
+int __rte_cold
 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 {
        uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
        struct ionic_qcq *rxq;
        int err;
 
-       IONIC_PRINT_CALL();
-
-       IONIC_PRINT(DEBUG, "Allocating RX queue buffers (size: %u)",
-               frame_size);
+       if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
+               IONIC_PRINT(DEBUG, "RX queue %u already started",
+                       rx_queue_id);
+               return 0;
+       }
 
        rxq = eth_dev->data->rx_queues[rx_queue_id];
 
-       err = ionic_lif_rxq_init(rxq);
-       if (err)
-               return err;
+       IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)",
+               rx_queue_id, rxq->q.num_descs, frame_size);
+
+       if (!(rxq->flags & IONIC_QCQ_F_INITED)) {
+               err = ionic_lif_rxq_init(rxq);
+               if (err)
+                       return err;
+       } else {
+               ionic_qcq_enable(rxq);
+       }
 
        /* Allocate buffers for descriptor rings */
        if (ionic_rx_fill(rxq, frame_size) != 0) {
@@ -887,15 +999,12 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
                return -1;
        }
 
-       ionic_qcq_enable(rxq);
-
-       eth_dev->data->rx_queue_state[rx_queue_id] =
-               RTE_ETH_QUEUE_STATE_STARTED;
+       rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 
        return 0;
 }
 
-static inline void __attribute__((cold))
+static inline void __rte_cold
 ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
                void *service_cb_arg)
 {
@@ -950,25 +1059,23 @@ ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
 /*
  * Stop Receive Units for specified queue.
  */
-int __attribute__((cold))
+int __rte_cold
 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 {
        struct ionic_qcq *rxq;
 
-       IONIC_PRINT_CALL();
+       IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
 
        rxq = eth_dev->data->rx_queues[rx_queue_id];
 
+       eth_dev->data->rx_queue_state[rx_queue_id] =
+               RTE_ETH_QUEUE_STATE_STOPPED;
+
        ionic_qcq_disable(rxq);
 
        /* Flush */
        ionic_rxq_service(&rxq->cq, -1, NULL);
 
-       ionic_lif_rxq_deinit(rxq);
-
-       eth_dev->data->rx_queue_state[rx_queue_id] =
-               RTE_ETH_QUEUE_STATE_STOPPED;
-
        return 0;
 }