return rc;
}
-static int bnxt_alloc_ctx_mem_buf(char *type, size_t size,
+static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size,
struct bnxt_ctx_mem_buf_info *ctx)
{
if (!ctx)
return -EINVAL;
- ctx->va = rte_zmalloc(type, size, 0);
+ ctx->va = rte_zmalloc_socket(type, size, 0,
+ bp->eth_dev->device->numa_node);
if (ctx->va == NULL)
return -ENOMEM;
rte_mem_lock_page(ctx->va);
sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 4 bytes for each counter-id */
- rc = bnxt_alloc_ctx_mem_buf(type,
+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 4,
&bp->flow_stat->rx_fc_in_tbl);
if (rc)
sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
- rc = bnxt_alloc_ctx_mem_buf(type,
+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 16,
&bp->flow_stat->rx_fc_out_tbl);
if (rc)
sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 4 bytes for each counter-id */
- rc = bnxt_alloc_ctx_mem_buf(type,
+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 4,
&bp->flow_stat->tx_fc_in_tbl);
if (rc)
sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
- rc = bnxt_alloc_ctx_mem_buf(type,
+ rc = bnxt_alloc_ctx_mem_buf(bp, type,
max_fc * 16,
&bp->flow_stat->tx_fc_out_tbl);
if (rc)
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+ if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
dev_info->tx_queue_offload_capa;
+ if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
+ dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
};
eth_dev->data->dev_conf.intr_conf.lsc = 1;
- eth_dev->data->dev_conf.intr_conf.rxq = 1;
dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
if (new.link_status != eth_dev->data->dev_link.link_status ||
new.link_speed != eth_dev->data->dev_link.link_speed) {
rte_eth_linkstatus_set(eth_dev, &new);
-
- rte_eth_dev_callback_process(eth_dev,
- RTE_ETH_EVENT_INTR_LSC,
- NULL);
-
bnxt_print_link_info(eth_dev);
}
if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) {
PMD_DRV_LOG(ERR,
- "Invalid hashkey length, should be 16 bytes\n");
+ "Invalid hashkey length, should be %d bytes\n",
+ HW_HASH_KEY_SIZE);
return -EINVAL;
}
memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len);
if (!rc)
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
+ if (bnxt_hwrm_config_host_mtu(bp))
+ PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n");
+
PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu);
return rc;
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt_cp_ring_info *cpr;
- uint32_t desc = 0, raw_cons;
+ uint32_t desc = 0, raw_cons, cp_ring_size;
struct bnxt_rx_queue *rxq;
struct rx_pkt_cmpl *rxcmp;
int rc;
rxq = dev->data->rx_queues[rx_queue_id];
cpr = rxq->cp_ring;
raw_cons = cpr->cp_raw_cons;
+ cp_ring_size = cpr->cp_ring_struct->ring_size;
while (1) {
uint32_t agg_cnt, cons, cmpl_type;
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
- if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
break;
cmpl_type = CMP_TYPE(rxcmp);
struct bnxt_rx_queue *rxq = rx_queue;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
- uint32_t desc, raw_cons;
+ uint32_t desc, raw_cons, cp_ring_size;
struct bnxt *bp = rxq->bp;
struct rx_pkt_cmpl *rxcmp;
int rc;
rxr = rxq->rx_ring;
cpr = rxq->cp_ring;
+ cp_ring_size = cpr->cp_ring_struct->ring_size;
/*
* For the vector receive case, the completion at the requested
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
- if (CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
return RTE_ETH_RX_DESC_DONE;
/* Check whether rx desc has an mbuf attached. */
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
- if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
break;
cmpl_type = CMP_TYPE(rxcmp);
bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
{
struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
- struct bnxt_tx_ring_info *txr;
- struct bnxt_cp_ring_info *cpr;
- struct rte_mbuf **tx_buf;
- struct tx_pkt_cmpl *txcmp;
- uint32_t cons, cp_cons;
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ uint32_t ring_mask, raw_cons, nb_tx_pkts = 0;
+ struct cmpl_base *cp_desc_ring;
int rc;
- if (!txq)
- return -EINVAL;
-
rc = is_bnxt_in_error(txq->bp);
if (rc)
return rc;
- cpr = txq->cp_ring;
- txr = txq->tx_ring;
-
if (offset >= txq->nb_tx_desc)
return -EINVAL;
- cons = RING_CMP(cpr->cp_ring_struct, offset);
- txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
- cp_cons = cpr->cp_raw_cons;
+ /* Return "desc done" if descriptor is available for use. */
+ if (bnxt_tx_bds_in_hw(txq) <= offset)
+ return RTE_ETH_TX_DESC_DONE;
- if (cons > cp_cons) {
- if (CMPL_VALID(txcmp, cpr->valid))
- return RTE_ETH_TX_DESC_UNAVAIL;
- } else {
- if (CMPL_VALID(txcmp, !cpr->valid))
- return RTE_ETH_TX_DESC_UNAVAIL;
+ raw_cons = cpr->cp_raw_cons;
+ cp_desc_ring = cpr->cp_desc_ring;
+ ring_mask = cpr->cp_ring_struct->ring_mask;
+
+ /* Check to see if hw has posted a completion for the descriptor. */
+ while (1) {
+ struct tx_cmpl *txcmp;
+ uint32_t cons;
+
+ cons = RING_CMPL(ring_mask, raw_cons);
+ txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
+
+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
+ break;
+
+ if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
+ nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque);
+
+ if (nb_tx_pkts > offset)
+ return RTE_ETH_TX_DESC_DONE;
+
+ raw_cons = NEXT_RAW_CMP(raw_cons);
}
- tx_buf = &txr->tx_buf_ring[cons];
- if (*tx_buf == NULL)
- return RTE_ETH_TX_DESC_DONE;
+ /* Descriptor is pending transmit, not yet completed by hardware. */
return RTE_ETH_TX_DESC_FULL;
}
err:
bp->flags |= BNXT_FLAG_FATAL_ERROR;
bnxt_uninit_resources(bp, false);
+ if (bp->eth_dev->data->dev_conf.intr_conf.rmv)
+ rte_eth_dev_callback_process(bp->eth_dev,
+ RTE_ETH_EVENT_INTR_RMV,
+ NULL);
pthread_mutex_unlock(&bp->err_recovery_lock);
PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n");
}
if (!mz) {
mz = rte_memzone_reserve_aligned(mz_name,
rmem->nr_pages * 8,
- SOCKET_ID_ANY,
+ bp->eth_dev->device->numa_node,
RTE_MEMZONE_2MB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG,
if (!mz) {
mz = rte_memzone_reserve_aligned(mz_name,
mem_size,
- SOCKET_ID_ANY,
+ bp->eth_dev->device->numa_node,
RTE_MEMZONE_1GB |
RTE_MEMZONE_SIZE_HINT_ONLY |
RTE_MEMZONE_IOVA_CONTIG,
PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n");
bnxt_eth_hw_addr_random(bp->mac_addr);
PMD_DRV_LOG(INFO,
- "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n",
bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2],
bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]);
static struct rte_pci_driver bnxt_rte_pmd = {
.id_table = bnxt_pci_id_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_INTR_RMV |
RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs
* and OVS-DPDK
*/