X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fionic%2Fionic_rxtx.c;h=2592f5cab6408c74376e7401ab182e24eee71b66;hb=585cacc67faadb1effe1cc47e817cbd380b8d354;hp=f3b46a262fa70601bfcd55abe5999b1c79a16092;hpb=a27d901331da7a0d6959cb2b3a90a017f2463103;p=dpdk.git diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c index f3b46a262f..2592f5cab6 100644 --- a/drivers/net/ionic/ionic_rxtx.c +++ b/drivers/net/ionic/ionic_rxtx.c @@ -67,7 +67,7 @@ ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_deferred_start = txq->deferred_start; } -static inline void __attribute__((cold)) +static inline void __rte_cold ionic_tx_flush(struct ionic_cq *cq) { struct ionic_queue *q = cq->bound_q; @@ -118,7 +118,7 @@ ionic_tx_flush(struct ionic_cq *cq) } } -void __attribute__((cold)) +void __rte_cold ionic_dev_tx_queue_release(void *tx_queue) { struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue; @@ -128,7 +128,7 @@ ionic_dev_tx_queue_release(void *tx_queue) ionic_qcq_free(txq); } -int __attribute__((cold)) +int __rte_cold ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) { struct ionic_qcq *txq; @@ -154,7 +154,7 @@ ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) return 0; } -int __attribute__((cold)) +int __rte_cold ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, uint16_t nb_desc, uint32_t socket_id __rte_unused, const struct rte_eth_txconf *tx_conf) @@ -208,7 +208,7 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, /* * Start Transmit Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) { struct ionic_qcq *txq; @@ -230,16 +230,59 @@ ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) return 0; } +static void +ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) +{ + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); + char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; + struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) + (l3_hdr + txm->l3_len); + + if (txm->ol_flags & PKT_TX_IP_CKSUM) { + struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; + ipv4_hdr->hdr_checksum = 0; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); + } else { + struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); + } +} + +static void +ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) +{ + struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); + char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + + txm->outer_l3_len + txm->l2_len; + struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) + (l3_hdr + txm->l3_len); + + if (txm->ol_flags & PKT_TX_IPV4) { + struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; + ipv4_hdr->hdr_checksum = 0; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); + } else { + struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; + tcp_hdr->cksum = 0; + tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); + } +} + static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, struct rte_mbuf *txm, rte_iova_t addr, uint8_t nsge, uint16_t len, uint32_t hdrlen, uint32_t mss, + bool encap, uint16_t vlan_tci, bool has_vlan, bool start, bool done) { uint8_t flags = 0; flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; @@ -284,10 +327,29 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, uint32_t len; uint32_t offset = 0; bool start, done; + bool encap; bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT); uint16_t vlan_tci = txm->vlan_tci; + uint64_t ol_flags = txm->ol_flags; + + encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && + ((ol_flags & PKT_TX_OUTER_IPV4) || + (ol_flags & PKT_TX_OUTER_IPV6)); - hdrlen = txm->l2_len + txm->l3_len; + /* Preload inner-most TCP csum field with IP pseudo hdr + * calculated with IP length set to zero. HW will later + * add in length to each TCP segment resulting from the TSO. + */ + + if (encap) { + ionic_tx_tcp_inner_pseudo_csum(txm); + hdrlen = txm->outer_l2_len + txm->outer_l3_len + + txm->l2_len + txm->l3_len + txm->l4_len; + } else { + ionic_tx_tcp_pseudo_csum(txm); + hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; + } seglen = hdrlen + mss; left = txm->data_len; @@ -311,6 +373,7 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, ionic_tx_tso_post(q, desc, txm, desc_addr, desc_nsge, desc_len, hdrlen, mss, + encap, vlan_tci, has_vlan, start, done && not_xmit_more); desc = ionic_tx_tso_next(q, &elem); @@ -352,6 +415,7 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, ionic_tx_tso_post(q, desc, txm_seg, desc_addr, desc_nsge, desc_len, hdrlen, mss, + encap, vlan_tci, has_vlan, start, done && not_xmit_more); desc = ionic_tx_tso_next(q, &elem); @@ -368,7 +432,7 @@ ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, static int ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm, - uint64_t offloads __rte_unused, bool not_xmit_more) + uint64_t offloads, bool not_xmit_more) { struct ionic_txq_desc *desc_base = q->base; struct ionic_txq_sg_desc *sg_desc_base = q->sg_base; @@ -377,15 +441,34 @@ ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm, struct ionic_txq_sg_elem *elem = sg_desc->elems; struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); struct rte_mbuf *txm_seg; + bool encap; bool has_vlan; uint64_t ol_flags = txm->ol_flags; uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; uint8_t flags = 0; + if ((ol_flags & PKT_TX_IP_CKSUM) && + (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) { + opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; + flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; + if (((ol_flags & PKT_TX_TCP_CKSUM) && + (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) || + ((ol_flags & PKT_TX_UDP_CKSUM) && + (offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) + flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; + } else { + stats->no_csum++; + } + has_vlan = (ol_flags & PKT_TX_VLAN_PKT); + encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || + (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && + ((ol_flags & PKT_TX_OUTER_IPV4) || + (ol_flags & PKT_TX_OUTER_IPV6)); flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); desc->len = txm->data_len; @@ -469,6 +552,7 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, PKT_TX_IPV4 | \ PKT_TX_IPV6 | \ PKT_TX_VLAN | \ + PKT_TX_IP_CKSUM | \ PKT_TX_TCP_SEG | \ PKT_TX_L4_MASK) @@ -525,7 +609,7 @@ ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.offloads = rxq->offloads; } -static void __attribute__((cold)) +static void __rte_cold ionic_rx_empty(struct ionic_queue *q) { struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q); @@ -541,7 +625,7 @@ ionic_rx_empty(struct ionic_queue *q) } } -void __attribute__((cold)) +void __rte_cold ionic_dev_rx_queue_release(void *rx_queue) { struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue; @@ -553,7 +637,7 @@ ionic_dev_rx_queue_release(void *rx_queue) ionic_qcq_free(rxq); } -int __attribute__((cold)) +int __rte_cold ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id, uint16_t nb_desc, @@ -700,6 +784,10 @@ ionic_rx_clean(struct ionic_queue *q, rxm->nb_segs++; } + /* RSS */ + pkt_flags |= PKT_RX_RSS_HASH; + rxm->hash.rss = cq_desc->rss_hash; + /* Vlan Strip */ if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; @@ -785,7 +873,7 @@ ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, ionic_q_post(q, true, ionic_rx_clean, mbuf); } -static int __attribute__((cold)) +static int __rte_cold ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len) { struct ionic_queue *q = &rxq->q; @@ -862,7 +950,7 @@ ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len) /* * Start Receive Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; @@ -895,7 +983,7 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) return 0; } -static inline void __attribute__((cold)) +static inline void __rte_cold ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do, void *service_cb_arg) { @@ -950,7 +1038,7 @@ ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do, /* * Stop Receive Units for specified queue. */ -int __attribute__((cold)) +int __rte_cold ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { struct ionic_qcq *rxq;