fix typos using codespell utility
[dpdk.git] / drivers / net / cxgbe / sge.c
index d98c3f6..22a168c 100644 (file)
@@ -388,7 +388,7 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
        struct rte_pktmbuf_pool_private *mbp_priv;
        u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame;
 
-       /* Use jumbo mtu buffers iff mbuf data room size can fit jumbo data. */
+       /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
        mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
        if (jumbo_en &&
            ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
@@ -593,7 +593,7 @@ static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
         * Write Header (incorporated as part of the cpl_tx_pkt_lso and
         * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
         * message or, if we're doing a Large Send Offload, an LSO CPL message
-        * with an embeded TX Packet Write CPL message.
+        * with an embedded TX Packet Write CPL message.
         */
        flits = sgl_len(m->nb_segs);
        if (m->tso_segsz)
@@ -683,6 +683,10 @@ static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
 #define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
 #define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
 
+#define PIDXDIFF(head, tail, wrap) \
+       ((tail) >= (head) ? (tail) - (head) : (wrap) - (head) + (tail))
+#define P_IDXDIFF(q, idx) PIDXDIFF((q)->cidx, idx, (q)->size)
+
 /**
  * ring_tx_db - ring a Tx queue's doorbell
  * @adap: the adapter
@@ -1461,74 +1465,101 @@ static int process_responses(struct sge_rspq *q, int budget,
                rsp_type = G_RSPD_TYPE(rc->u.type_gen);
 
                if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
-                       const struct rx_sw_desc *rsd =
-                                               &rxq->fl.sdesc[rxq->fl.cidx];
-                       const struct rss_header *rss_hdr =
-                                               (const void *)q->cur_desc;
-                       const struct cpl_rx_pkt *cpl =
-                                               (const void *)&q->cur_desc[1];
-                       struct rte_mbuf *pkt, *npkt;
-                       u32 len, bufsz;
-                       bool csum_ok;
-                       u16 err_vec;
-
-                       len = ntohl(rc->pldbuflen_qid);
-                       BUG_ON(!(len & F_RSPD_NEWBUF));
-                       pkt = rsd->buf;
-                       npkt = pkt;
-                       len = G_RSPD_LEN(len);
-                       pkt->pkt_len = len;
-
-                       /* Compressed error vector is enabled for
-                        * T6 only
-                        */
-                       if (q->adapter->params.tp.rx_pkt_encap)
-                               err_vec = G_T6_COMPR_RXERR_VEC(
-                                               ntohs(cpl->err_vec));
-                       else
-                               err_vec = ntohs(cpl->err_vec);
-                       csum_ok = cpl->csum_calc && !err_vec;
-
-                       /* Chain mbufs into len if necessary */
-                       while (len) {
-                               struct rte_mbuf *new_pkt = rsd->buf;
-
-                               bufsz = min(get_buf_size(q->adapter, rsd), len);
-                               new_pkt->data_len = bufsz;
-                               unmap_rx_buf(&rxq->fl);
-                               len -= bufsz;
-                               npkt->next = new_pkt;
-                               npkt = new_pkt;
-                               pkt->nb_segs++;
-                               rsd = &rxq->fl.sdesc[rxq->fl.cidx];
-                       }
-                       npkt->next = NULL;
-                       pkt->nb_segs--;
-
-                       if (cpl->l2info & htonl(F_RXF_IP)) {
-                               pkt->packet_type = RTE_PTYPE_L3_IPV4;
-                               if (unlikely(!csum_ok))
-                                       pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
-                               if ((cpl->l2info &
-                                    htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
-                                       pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-                       } else if (cpl->l2info & htonl(F_RXF_IP6)) {
-                               pkt->packet_type = RTE_PTYPE_L3_IPV6;
-                       }
+                       unsigned int stat_pidx;
+                       int stat_pidx_diff;
+
+                       stat_pidx = ntohs(q->stat->pidx);
+                       stat_pidx_diff = P_IDXDIFF(q, stat_pidx);
+                       while (stat_pidx_diff && budget_left) {
+                               const struct rx_sw_desc *rsd =
+                                       &rxq->fl.sdesc[rxq->fl.cidx];
+                               const struct rss_header *rss_hdr =
+                                       (const void *)q->cur_desc;
+                               const struct cpl_rx_pkt *cpl =
+                                       (const void *)&q->cur_desc[1];
+                               struct rte_mbuf *pkt, *npkt;
+                               u32 len, bufsz;
+                               bool csum_ok;
+                               u16 err_vec;
+
+                               rc = (const struct rsp_ctrl *)
+                                    ((const char *)q->cur_desc +
+                                     (q->iqe_len - sizeof(*rc)));
+
+                               rsp_type = G_RSPD_TYPE(rc->u.type_gen);
+                               if (unlikely(rsp_type != X_RSPD_TYPE_FLBUF))
+                                       break;
+
+                               len = ntohl(rc->pldbuflen_qid);
+                               BUG_ON(!(len & F_RSPD_NEWBUF));
+                               pkt = rsd->buf;
+                               npkt = pkt;
+                               len = G_RSPD_LEN(len);
+                               pkt->pkt_len = len;
+
+                               /* Compressed error vector is enabled for
+                                * T6 only
+                                */
+                               if (q->adapter->params.tp.rx_pkt_encap)
+                                       err_vec = G_T6_COMPR_RXERR_VEC(
+                                                       ntohs(cpl->err_vec));
+                               else
+                                       err_vec = ntohs(cpl->err_vec);
+                               csum_ok = cpl->csum_calc && !err_vec;
+
+                               /* Chain mbufs into len if necessary */
+                               while (len) {
+                                       struct rte_mbuf *new_pkt = rsd->buf;
+
+                                       bufsz = min(get_buf_size(q->adapter,
+                                                                rsd), len);
+                                       new_pkt->data_len = bufsz;
+                                       unmap_rx_buf(&rxq->fl);
+                                       len -= bufsz;
+                                       npkt->next = new_pkt;
+                                       npkt = new_pkt;
+                                       pkt->nb_segs++;
+                                       rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+                               }
+                               npkt->next = NULL;
+                               pkt->nb_segs--;
+
+                               if (cpl->l2info & htonl(F_RXF_IP)) {
+                                       pkt->packet_type = RTE_PTYPE_L3_IPV4;
+                                       if (unlikely(!csum_ok))
+                                               pkt->ol_flags |=
+                                                       PKT_RX_IP_CKSUM_BAD;
+
+                                       if ((cpl->l2info &
+                                            htonl(F_RXF_UDP | F_RXF_TCP)) &&
+                                           !csum_ok)
+                                               pkt->ol_flags |=
+                                                       PKT_RX_L4_CKSUM_BAD;
+                               } else if (cpl->l2info & htonl(F_RXF_IP6)) {
+                                       pkt->packet_type = RTE_PTYPE_L3_IPV6;
+                               }
 
-                       if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
-                               pkt->ol_flags |= PKT_RX_RSS_HASH;
-                               pkt->hash.rss = ntohl(rss_hdr->hash_val);
-                       }
+                               if (!rss_hdr->filter_tid &&
+                                   rss_hdr->hash_type) {
+                                       pkt->ol_flags |= PKT_RX_RSS_HASH;
+                                       pkt->hash.rss =
+                                               ntohl(rss_hdr->hash_val);
+                               }
+
+                               if (cpl->vlan_ex) {
+                                       pkt->ol_flags |= PKT_RX_VLAN_PKT;
+                                       pkt->vlan_tci = ntohs(cpl->vlan);
+                               }
+
+                               rxq->stats.pkts++;
+                               rxq->stats.rx_bytes += pkt->pkt_len;
+                               rx_pkts[budget - budget_left] = pkt;
 
-                       if (cpl->vlan_ex) {
-                               pkt->ol_flags |= PKT_RX_VLAN_PKT;
-                               pkt->vlan_tci = ntohs(cpl->vlan);
+                               rspq_next(q);
+                               budget_left--;
+                               stat_pidx_diff--;
                        }
-                       rxq->stats.pkts++;
-                       rxq->stats.rx_bytes += pkt->pkt_len;
-                       rx_pkts[budget - budget_left] = pkt;
+                       continue;
                } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
                        ret = q->handler(q, q->cur_desc, NULL);
                } else {
@@ -1660,7 +1691,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
        iq->size = cxgbe_roundup(iq->size, 16);
 
        snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                eth_dev->data->drv_name,
+                eth_dev->device->driver->name,
                 fwevtq ? "fwq_ring" : "rx_ring",
                 eth_dev->data->port_id, queue_id);
        snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1714,7 +1745,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                fl->size = cxgbe_roundup(fl->size, 8);
 
                snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                        eth_dev->data->drv_name,
+                        eth_dev->device->driver->name,
                         fwevtq ? "fwq_ring" : "fl_ring",
                         eth_dev->data->port_id, queue_id);
                snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1914,7 +1945,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
        nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
        snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                eth_dev->data->drv_name, "tx_ring",
+                eth_dev->device->driver->name, "tx_ring",
                 eth_dev->data->port_id, queue_id);
        snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);