X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Fem_rxtx.c;h=31819c5bd51be4cd92f59386821a931ac3267364;hb=79f26987f95da6c787666c763274c88284a3b601;hp=d8fb252ff14136828dd3279553abe76b5b6376d4;hpb=65ed00f834597c3cb8a913e2c06dd1e93dac32b9;p=dpdk.git diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index d8fb252ff1..31819c5bd5 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -56,7 +56,6 @@ #include #include #include -#include #include #include #include @@ -67,6 +66,7 @@ #include #include #include +#include #include #include "e1000_logs.h" @@ -78,21 +78,13 @@ #define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return (m); -} +#define E1000_TX_OFFLOAD_MASK ( \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_VLAN_PKT) -#define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) - -#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ - (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) +#define E1000_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK) /** * Structure associated with each descriptor of the RX ring of a RX queue. @@ -312,10 +304,10 @@ what_ctx_update(struct em_tx_queue *txq, uint64_t flags, if (likely (txq->ctx_cache.flags == flags && ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) & txq->ctx_cache.cmp_mask) == 0)) - return (EM_CTX_0); + return EM_CTX_0; /* Mismatch */ - return (EM_CTX_NUM); + return EM_CTX_NUM; } /* Reset transmit descriptors after they have been used */ @@ -373,7 +365,7 @@ em_xmit_cleanup(struct em_tx_queue *txq) txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); /* No Error */ - return (0); + return 0; } static inline uint32_t @@ -385,7 +377,7 @@ tx_desc_cksum_flags_to_upper(uint64_t ol_flags) tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM]; tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0]; - return (tmp); + return tmp; } uint16_t @@ -493,7 +485,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (em_xmit_cleanup(txq) != 0) { /* Could not clean any descriptors */ if (nb_tx == 0) - return (0); + return 0; goto end_of_tx; } } @@ -585,7 +577,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * Set up Transmit Data Descriptor. */ slen = m_seg->data_len; - buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + buf_dma_addr = rte_mbuf_data_dma_addr(m_seg); txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen); @@ -627,10 +619,47 @@ end_of_tx: PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) tx_id, (unsigned) nb_tx); - E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); + E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); txq->tx_tail = tx_id; - return (nb_tx); + return nb_tx; +} + +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ +uint16_t +eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + + if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = -ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = ret; + return i; + } + } + + return i; } /********************************************************************* @@ -645,7 +674,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status) uint64_t pkt_flags; /* Check if VLAN present */ - pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0); + pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? + PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0); return pkt_flags; } @@ -659,7 +689,7 @@ rx_desc_error_to_pkt_flags(uint32_t rx_error) pkt_flags |= PKT_RX_IP_CKSUM_BAD; if (rx_error & E1000_RXD_ERR_TCPE) pkt_flags |= PKT_RX_L4_CKSUM_BAD; - return (pkt_flags); + return pkt_flags; } uint16_t @@ -735,7 +765,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) status, (unsigned) rte_le_to_cpu_16(rxd.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", @@ -769,7 +799,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm = rxe->mbuf; rxe->mbuf = nmb; dma_addr = - rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); rxdp->buffer_addr = dma_addr; rxdp->status = 0; @@ -833,7 +863,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = 0; } rxq->nb_rx_hold = nb_hold; - return (nb_rx); + return nb_rx; } uint16_t @@ -915,7 +945,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) status, (unsigned) rte_le_to_cpu_16(rxd.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, @@ -949,7 +979,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ rxm = rxe->mbuf; rxe->mbuf = nmb; - dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb)); rxdp->buffer_addr = dma; rxdp->status = 0; @@ -1078,7 +1108,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = 0; } rxq->nb_rx_hold = nb_hold; - return (nb_rx); + return nb_rx; } #define EM_MAX_BUF_SIZE 16384 @@ -1234,19 +1264,19 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize, RTE_CACHE_LINE_SIZE, socket_id); if (tz == NULL) - return (-ENOMEM); + return -ENOMEM; /* Allocate the tx queue data structure. */ if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq), RTE_CACHE_LINE_SIZE)) == NULL) - return (-ENOMEM); + return -ENOMEM; /* Allocate software ring */ if ((txq->sw_ring = rte_zmalloc("txq->sw_ring", sizeof(txq->sw_ring[0]) * nb_desc, RTE_CACHE_LINE_SIZE)) == NULL) { em_tx_queue_release(txq); - return (-ENOMEM); + return -ENOMEM; } txq->nb_tx_desc = nb_desc; @@ -1268,7 +1298,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, em_reset_tx_queue(txq); dev->data->tx_queues[queue_idx] = txq; - return (0); + return 0; } static void @@ -1335,7 +1365,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, if (nb_desc % EM_RXD_ALIGN != 0 || (nb_desc > E1000_MAX_RING_DESC) || (nb_desc < E1000_MIN_RING_DESC)) { - return (-EINVAL); + return -EINVAL; } /* @@ -1344,7 +1374,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, if (rx_conf->rx_drop_en) { PMD_INIT_LOG(ERR, "drop_en functionality not supported by " "device"); - return (-EINVAL); + return -EINVAL; } /* Free memory prior to re-allocation if needed. */ @@ -1358,19 +1388,19 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize, RTE_CACHE_LINE_SIZE, socket_id); if (rz == NULL) - return (-ENOMEM); + return -ENOMEM; /* Allocate the RX queue data structure. */ if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq), RTE_CACHE_LINE_SIZE)) == NULL) - return (-ENOMEM); + return -ENOMEM; /* Allocate software ring. */ if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring", sizeof (rxq->sw_ring[0]) * nb_desc, RTE_CACHE_LINE_SIZE)) == NULL) { em_rx_queue_release(rxq); - return (-ENOMEM); + return -ENOMEM; } rxq->mb_pool = mp; @@ -1395,7 +1425,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, dev->data->rx_queues[queue_idx] = rxq; em_reset_rx_queue(rxq); - return (0); + return 0; } uint32_t @@ -1406,11 +1436,6 @@ eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct em_rx_queue *rxq; uint32_t desc = 0; - if (rx_queue_id >= dev->data->nb_rx_queues) { - PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id); - return 0; - } - rxq = dev->data->rx_queues[rx_queue_id]; rxdp = &(rxq->rx_ring[rxq->rx_tail]); @@ -1443,6 +1468,57 @@ eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset) return !!(rxdp->status & E1000_RXD_STAT_DD); } +int +eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + struct em_rx_queue *rxq = rx_queue; + volatile uint8_t *status; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].status; + if (*status & E1000_RXD_STAT_DD) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + struct em_tx_queue *txq = tx_queue; + volatile uint8_t *status; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + /* go to next desc that has the RS bit */ + desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * + txq->tx_rs_thresh; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].upper.fields.status; + if (*status & E1000_TXD_STAT_DD) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + void em_dev_clear_queues(struct rte_eth_dev *dev) { @@ -1546,12 +1622,12 @@ em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz) i++) { if (rctl_bsize >= bufsz_to_rctl[i].bufsz) { *bufsz = bufsz_to_rctl[i].bufsz; - return (bufsz_to_rctl[i].rctl); + return bufsz_to_rctl[i].rctl; } } /* Should never happen. */ - return (-EINVAL); + return -EINVAL; } static int @@ -1567,15 +1643,16 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) /* Initialize software ring entries */ for (i = 0; i < rxq->nb_rx_desc; i++) { volatile struct e1000_rx_desc *rxd; - struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed " "queue_id=%hu", rxq->queue_id); - return (-ENOMEM); + return -ENOMEM; } - dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf)); /* Clear HW ring memory */ rxq->rx_ring[i] = rxd_init;