1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.
5 #include <rte_malloc.h>
6 #include <ethdev_driver.h>
9 #include "ionic_logs.h"
10 #include "ionic_lif.h"
11 #include "ionic_ethdev.h"
12 #include "ionic_rx_filter.h"
13 #include "ionic_rxtx.h"
15 /* queuetype support level */
16 static const uint8_t ionic_qtype_vers[IONIC_QTYPE_MAX] = {
17 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
18 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
19 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support
23 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support
24 * 1 = ... with Tx SG version 1
30 static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr);
31 static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr);
34 ionic_qcq_enable(struct ionic_qcq *qcq)
36 struct ionic_queue *q = &qcq->q;
37 struct ionic_lif *lif = q->lif;
38 struct ionic_admin_ctx ctx = {
41 .opcode = IONIC_CMD_Q_CONTROL,
43 .index = rte_cpu_to_le_32(q->index),
44 .oper = IONIC_Q_ENABLE,
48 return ionic_adminq_post_wait(lif, &ctx);
52 ionic_qcq_disable(struct ionic_qcq *qcq)
54 struct ionic_queue *q = &qcq->q;
55 struct ionic_lif *lif = q->lif;
56 struct ionic_admin_ctx ctx = {
59 .opcode = IONIC_CMD_Q_CONTROL,
61 .index = rte_cpu_to_le_32(q->index),
62 .oper = IONIC_Q_DISABLE,
66 return ionic_adminq_post_wait(lif, &ctx);
70 ionic_lif_stop(struct ionic_lif *lif)
76 lif->state &= ~IONIC_LIF_F_UP;
78 for (i = 0; i < lif->nrxqcqs; i++) {
79 struct ionic_qcq *rxq = lif->rxqcqs[i];
80 if (rxq->flags & IONIC_QCQ_F_INITED)
81 (void)ionic_dev_rx_queue_stop(lif->eth_dev, i);
84 for (i = 0; i < lif->ntxqcqs; i++) {
85 struct ionic_qcq *txq = lif->txqcqs[i];
86 if (txq->flags & IONIC_QCQ_F_INITED)
87 (void)ionic_dev_tx_queue_stop(lif->eth_dev, i);
92 ionic_lif_reset(struct ionic_lif *lif)
94 struct ionic_dev *idev = &lif->adapter->idev;
99 ionic_dev_cmd_lif_reset(idev);
100 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
102 IONIC_PRINT(WARNING, "Failed to reset %s", lif->name);
106 ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats)
108 struct ionic_lif_stats *ls = &lif->info->stats;
110 uint32_t num_rx_q_counters = RTE_MIN(lif->nrxqcqs, (uint32_t)
111 RTE_ETHDEV_QUEUE_STAT_CNTRS);
112 uint32_t num_tx_q_counters = RTE_MIN(lif->ntxqcqs, (uint32_t)
113 RTE_ETHDEV_QUEUE_STAT_CNTRS);
115 memset(stats, 0, sizeof(*stats));
118 IONIC_PRINT(DEBUG, "Stats on port %u not yet initialized",
125 stats->ipackets = ls->rx_ucast_packets +
126 ls->rx_mcast_packets +
127 ls->rx_bcast_packets;
129 stats->ibytes = ls->rx_ucast_bytes +
133 for (i = 0; i < lif->nrxqcqs; i++) {
134 struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats.rx;
136 rx_stats->no_cb_arg +
137 rx_stats->bad_cq_status +
143 ls->rx_ucast_drop_packets +
144 ls->rx_mcast_drop_packets +
145 ls->rx_bcast_drop_packets;
150 ls->rx_queue_disabled +
151 ls->rx_desc_fetch_error +
152 ls->rx_desc_data_error;
154 for (i = 0; i < num_rx_q_counters; i++) {
155 struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats.rx;
156 stats->q_ipackets[i] = rx_stats->packets;
157 stats->q_ibytes[i] = rx_stats->bytes;
159 rx_stats->no_cb_arg +
160 rx_stats->bad_cq_status +
167 stats->opackets = ls->tx_ucast_packets +
168 ls->tx_mcast_packets +
169 ls->tx_bcast_packets;
171 stats->obytes = ls->tx_ucast_bytes +
175 for (i = 0; i < lif->ntxqcqs; i++) {
176 struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats.tx;
177 stats->oerrors += tx_stats->drop;
181 ls->tx_ucast_drop_packets +
182 ls->tx_mcast_drop_packets +
183 ls->tx_bcast_drop_packets;
187 ls->tx_queue_disabled +
188 ls->tx_desc_fetch_error +
189 ls->tx_desc_data_error;
191 for (i = 0; i < num_tx_q_counters; i++) {
192 struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats.tx;
193 stats->q_opackets[i] = tx_stats->packets;
194 stats->q_obytes[i] = tx_stats->bytes;
199 ionic_lif_get_stats(const struct ionic_lif *lif,
200 struct rte_eth_stats *stats)
202 ionic_lif_get_abs_stats(lif, stats);
204 stats->ipackets -= lif->stats_base.ipackets;
205 stats->opackets -= lif->stats_base.opackets;
206 stats->ibytes -= lif->stats_base.ibytes;
207 stats->obytes -= lif->stats_base.obytes;
208 stats->imissed -= lif->stats_base.imissed;
209 stats->ierrors -= lif->stats_base.ierrors;
210 stats->oerrors -= lif->stats_base.oerrors;
211 stats->rx_nombuf -= lif->stats_base.rx_nombuf;
215 ionic_lif_reset_stats(struct ionic_lif *lif)
219 for (i = 0; i < lif->nrxqcqs; i++) {
220 memset(&lif->rxqcqs[i]->stats.rx, 0,
221 sizeof(struct ionic_rx_stats));
222 memset(&lif->txqcqs[i]->stats.tx, 0,
223 sizeof(struct ionic_tx_stats));
226 ionic_lif_get_abs_stats(lif, &lif->stats_base);
230 ionic_lif_get_hw_stats(struct ionic_lif *lif, struct ionic_lif_stats *stats)
232 uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
233 uint64_t *stats64 = (uint64_t *)stats;
234 uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
235 uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
237 for (i = 0; i < count; i++)
238 stats64[i] = lif_stats64[i] - lif_stats64_base[i];
242 ionic_lif_reset_hw_stats(struct ionic_lif *lif)
244 uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t);
245 uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats;
246 uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base;
248 for (i = 0; i < count; i++)
249 lif_stats64_base[i] = lif_stats64[i];
253 ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr)
255 struct ionic_admin_ctx ctx = {
256 .pending_work = true,
257 .cmd.rx_filter_add = {
258 .opcode = IONIC_CMD_RX_FILTER_ADD,
259 .match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_MAC),
264 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, RTE_ETHER_ADDR_LEN);
266 err = ionic_adminq_post_wait(lif, &ctx);
270 IONIC_PRINT(INFO, "rx_filter add (id %d)",
271 rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
273 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
277 ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr)
279 struct ionic_admin_ctx ctx = {
280 .pending_work = true,
281 .cmd.rx_filter_del = {
282 .opcode = IONIC_CMD_RX_FILTER_DEL,
285 struct ionic_rx_filter *f;
290 rte_spinlock_lock(&lif->rx_filters.lock);
292 f = ionic_rx_filter_by_addr(lif, addr);
294 rte_spinlock_unlock(&lif->rx_filters.lock);
298 ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
299 ionic_rx_filter_free(f);
301 rte_spinlock_unlock(&lif->rx_filters.lock);
303 err = ionic_adminq_post_wait(lif, &ctx);
307 IONIC_PRINT(INFO, "rx_filter del (id %d)",
308 rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
314 ionic_dev_add_mac(struct rte_eth_dev *eth_dev,
315 struct rte_ether_addr *mac_addr,
316 uint32_t index __rte_unused, uint32_t pool __rte_unused)
318 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
322 return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
326 ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, uint32_t index)
328 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
329 struct ionic_adapter *adapter = lif->adapter;
330 struct rte_ether_addr *mac_addr;
334 if (index >= adapter->max_mac_addrs) {
336 "Index %u is above MAC filter limit %u",
337 index, adapter->max_mac_addrs);
341 mac_addr = ð_dev->data->mac_addrs[index];
343 if (!rte_is_valid_assigned_ether_addr(mac_addr))
346 ionic_lif_addr_del(lif, (const uint8_t *)mac_addr);
350 ionic_dev_set_mac(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
352 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
356 if (mac_addr == NULL) {
357 IONIC_PRINT(NOTICE, "New mac is null");
361 if (!rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) {
362 IONIC_PRINT(INFO, "Deleting mac addr %pM",
364 ionic_lif_addr_del(lif, lif->mac_addr);
365 memset(lif->mac_addr, 0, RTE_ETHER_ADDR_LEN);
368 IONIC_PRINT(INFO, "Updating mac addr");
370 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)lif->mac_addr);
372 return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr);
376 ionic_vlan_rx_add_vid(struct ionic_lif *lif, uint16_t vid)
378 struct ionic_admin_ctx ctx = {
379 .pending_work = true,
380 .cmd.rx_filter_add = {
381 .opcode = IONIC_CMD_RX_FILTER_ADD,
382 .match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_VLAN),
383 .vlan.vlan = rte_cpu_to_le_16(vid),
388 err = ionic_adminq_post_wait(lif, &ctx);
392 IONIC_PRINT(INFO, "rx_filter add VLAN %d (id %d)", vid,
393 rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id));
395 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx);
399 ionic_vlan_rx_kill_vid(struct ionic_lif *lif, uint16_t vid)
401 struct ionic_admin_ctx ctx = {
402 .pending_work = true,
403 .cmd.rx_filter_del = {
404 .opcode = IONIC_CMD_RX_FILTER_DEL,
407 struct ionic_rx_filter *f;
412 rte_spinlock_lock(&lif->rx_filters.lock);
414 f = ionic_rx_filter_by_vlan(lif, vid);
416 rte_spinlock_unlock(&lif->rx_filters.lock);
420 ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id);
421 ionic_rx_filter_free(f);
422 rte_spinlock_unlock(&lif->rx_filters.lock);
424 err = ionic_adminq_post_wait(lif, &ctx);
428 IONIC_PRINT(INFO, "rx_filter del VLAN %d (id %d)", vid,
429 rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id));
435 ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id,
438 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
442 err = ionic_vlan_rx_add_vid(lif, vlan_id);
444 err = ionic_vlan_rx_kill_vid(lif, vlan_id);
450 ionic_lif_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
452 struct ionic_admin_ctx ctx = {
453 .pending_work = true,
455 .opcode = IONIC_CMD_RX_MODE_SET,
456 .rx_mode = rte_cpu_to_le_16(rx_mode),
461 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
462 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_UNICAST");
463 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
464 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_MULTICAST");
465 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
466 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_BROADCAST");
467 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
468 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_PROMISC");
469 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
470 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_ALLMULTI");
472 err = ionic_adminq_post_wait(lif, &ctx);
474 IONIC_PRINT(ERR, "Failure setting RX mode");
478 ionic_set_rx_mode(struct ionic_lif *lif, uint32_t rx_mode)
480 if (lif->rx_mode != rx_mode) {
481 lif->rx_mode = rx_mode;
482 ionic_lif_rx_mode(lif, rx_mode);
487 ionic_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
489 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
490 uint32_t rx_mode = lif->rx_mode;
494 rx_mode |= IONIC_RX_MODE_F_PROMISC;
496 ionic_set_rx_mode(lif, rx_mode);
502 ionic_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
504 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
505 uint32_t rx_mode = lif->rx_mode;
507 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
509 ionic_set_rx_mode(lif, rx_mode);
515 ionic_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
517 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
518 uint32_t rx_mode = lif->rx_mode;
520 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
522 ionic_set_rx_mode(lif, rx_mode);
528 ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
530 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
531 uint32_t rx_mode = lif->rx_mode;
533 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
535 ionic_set_rx_mode(lif, rx_mode);
541 ionic_lif_change_mtu(struct ionic_lif *lif, int new_mtu)
543 struct ionic_admin_ctx ctx = {
544 .pending_work = true,
546 .opcode = IONIC_CMD_LIF_SETATTR,
547 .attr = IONIC_LIF_ATTR_MTU,
548 .mtu = rte_cpu_to_le_32(new_mtu),
553 err = ionic_adminq_post_wait(lif, &ctx);
561 ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
563 struct ionic_adapter *adapter = lif->adapter;
564 struct ionic_dev *idev = &adapter->idev;
568 * Note: interrupt handler is called for index = 0 only
569 * (we use interrupts for the notifyq only anyway,
570 * which has index = 0)
573 for (index = 0; index < adapter->nintrs; index++)
574 if (!adapter->intrs[index])
577 if (index == adapter->nintrs)
580 adapter->intrs[index] = true;
582 ionic_intr_init(idev, intr, index);
588 ionic_intr_free(struct ionic_lif *lif, struct ionic_intr_info *intr)
590 if (intr->index != IONIC_INTR_NONE)
591 lif->adapter->intrs[intr->index] = false;
595 ionic_qcq_alloc(struct ionic_lif *lif, uint8_t type,
597 const char *base, uint32_t flags,
600 uint32_t cq_desc_size,
601 uint32_t sg_desc_size,
602 struct ionic_qcq **qcq)
604 struct ionic_dev *idev = &lif->adapter->idev;
605 struct ionic_qcq *new;
606 uint32_t q_size, cq_size, sg_size, total_size;
607 void *q_base, *cq_base, *sg_base;
608 rte_iova_t q_base_pa = 0;
609 rte_iova_t cq_base_pa = 0;
610 rte_iova_t sg_base_pa = 0;
611 uint32_t socket_id = rte_socket_id();
616 q_size = num_descs * desc_size;
617 cq_size = num_descs * cq_desc_size;
618 sg_size = num_descs * sg_desc_size;
620 total_size = RTE_ALIGN(q_size, PAGE_SIZE) +
621 RTE_ALIGN(cq_size, PAGE_SIZE);
623 * Note: aligning q_size/cq_size is not enough due to cq_base address
624 * aligning as q_base could be not aligned to the page.
627 total_size += PAGE_SIZE;
629 if (flags & IONIC_QCQ_F_SG) {
630 total_size += RTE_ALIGN(sg_size, PAGE_SIZE);
631 total_size += PAGE_SIZE;
634 new = rte_zmalloc("ionic", sizeof(*new), 0);
636 IONIC_PRINT(ERR, "Cannot allocate queue structure");
643 new->q.info = rte_zmalloc("ionic", sizeof(*new->q.info) * num_descs, 0);
645 IONIC_PRINT(ERR, "Cannot allocate queue info");
647 goto err_out_free_qcq;
652 err = ionic_q_init(lif, idev, &new->q, index, num_descs,
653 desc_size, sg_desc_size);
655 IONIC_PRINT(ERR, "Queue initialization failed");
656 goto err_out_free_info;
659 err = ionic_cq_init(lif, &new->cq, num_descs, cq_desc_size);
661 IONIC_PRINT(ERR, "Completion queue initialization failed");
662 goto err_out_free_info;
665 new->base_z = rte_eth_dma_zone_reserve(lif->eth_dev,
666 base /* name */, index /* queue_idx */,
667 total_size, IONIC_ALIGN, socket_id);
670 IONIC_PRINT(ERR, "Cannot reserve queue DMA memory");
672 goto err_out_free_info;
675 new->base = new->base_z->addr;
676 new->base_pa = new->base_z->iova;
677 new->total_size = total_size;
680 q_base_pa = new->base_pa;
682 cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
683 cq_base_pa = RTE_ALIGN(q_base_pa + q_size, PAGE_SIZE);
685 if (flags & IONIC_QCQ_F_SG) {
686 sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size,
688 sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
689 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
692 IONIC_PRINT(DEBUG, "Q-Base-PA = %#jx CQ-Base-PA = %#jx "
694 q_base_pa, cq_base_pa, sg_base_pa);
696 ionic_q_map(&new->q, q_base, q_base_pa);
697 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
698 ionic_cq_bind(&new->cq, &new->q);
705 rte_free(new->q.info);
713 ionic_qcq_free(struct ionic_qcq *qcq)
718 rte_memzone_free(qcq->base_z);
723 rte_free(qcq->q.info);
731 ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t nrxq_descs,
732 struct ionic_qcq **qcq)
737 flags = IONIC_QCQ_F_SG;
738 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, index, "rx", flags,
740 sizeof(struct ionic_rxq_desc),
741 sizeof(struct ionic_rxq_comp),
742 sizeof(struct ionic_rxq_sg_desc),
743 &lif->rxqcqs[index]);
747 *qcq = lif->rxqcqs[index];
753 ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t ntxq_descs,
754 struct ionic_qcq **qcq)
759 flags = IONIC_QCQ_F_SG;
760 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, index, "tx", flags,
762 sizeof(struct ionic_txq_desc),
763 sizeof(struct ionic_txq_comp),
764 sizeof(struct ionic_txq_sg_desc),
765 &lif->txqcqs[index]);
769 *qcq = lif->txqcqs[index];
775 ionic_admin_qcq_alloc(struct ionic_lif *lif)
781 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
783 sizeof(struct ionic_admin_cmd),
784 sizeof(struct ionic_admin_comp),
794 ionic_notify_qcq_alloc(struct ionic_lif *lif)
796 struct ionic_qcq *nqcq;
797 struct ionic_dev *idev = &lif->adapter->idev;
801 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notify",
803 IONIC_NOTIFYQ_LENGTH,
804 sizeof(struct ionic_notifyq_cmd),
805 sizeof(union ionic_notifyq_comp),
811 err = ionic_intr_alloc(lif, &nqcq->intr);
813 ionic_qcq_free(nqcq);
817 ionic_intr_mask_assert(idev->intr_ctrl, nqcq->intr.index,
818 IONIC_INTR_MASK_SET);
820 lif->notifyqcq = nqcq;
826 ionic_bus_map_dbpage(struct ionic_adapter *adapter, int page_num)
828 char *vaddr = adapter->bars[IONIC_PCI_BAR_DBELL].vaddr;
830 if (adapter->num_bars <= IONIC_PCI_BAR_DBELL)
833 return (void *)&vaddr[page_num << PAGE_SHIFT];
837 ionic_lif_queue_identify(struct ionic_lif *lif)
839 struct ionic_adapter *adapter = lif->adapter;
840 struct ionic_dev *idev = &adapter->idev;
841 union ionic_q_identity *q_ident = &adapter->ident.txq;
842 uint32_t q_words = RTE_DIM(q_ident->words);
843 uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data);
844 uint32_t i, nwords, qtype;
847 for (qtype = 0; qtype < RTE_DIM(ionic_qtype_vers); qtype++) {
848 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
850 /* Filter out the types this driver knows about */
852 case IONIC_QTYPE_ADMINQ:
853 case IONIC_QTYPE_NOTIFYQ:
854 case IONIC_QTYPE_RXQ:
855 case IONIC_QTYPE_TXQ:
861 memset(qti, 0, sizeof(*qti));
863 ionic_dev_cmd_queue_identify(idev, IONIC_LIF_TYPE_CLASSIC,
864 qtype, ionic_qtype_vers[qtype]);
865 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
866 if (err == -EINVAL) {
867 IONIC_PRINT(ERR, "qtype %d not supported\n", qtype);
869 } else if (err == -EIO) {
870 IONIC_PRINT(ERR, "q_ident failed, older FW\n");
873 IONIC_PRINT(ERR, "q_ident failed, qtype %d: %d\n",
878 nwords = RTE_MIN(q_words, cmd_words);
879 for (i = 0; i < nwords; i++)
880 q_ident->words[i] = ioread32(&idev->dev_cmd->data[i]);
882 qti->version = q_ident->version;
883 qti->supported = q_ident->supported;
884 qti->features = rte_le_to_cpu_64(q_ident->features);
885 qti->desc_sz = rte_le_to_cpu_16(q_ident->desc_sz);
886 qti->comp_sz = rte_le_to_cpu_16(q_ident->comp_sz);
887 qti->sg_desc_sz = rte_le_to_cpu_16(q_ident->sg_desc_sz);
888 qti->max_sg_elems = rte_le_to_cpu_16(q_ident->max_sg_elems);
889 qti->sg_desc_stride =
890 rte_le_to_cpu_16(q_ident->sg_desc_stride);
892 IONIC_PRINT(DEBUG, " qtype[%d].version = %d",
893 qtype, qti->version);
894 IONIC_PRINT(DEBUG, " qtype[%d].supported = %#x",
895 qtype, qti->supported);
896 IONIC_PRINT(DEBUG, " qtype[%d].features = %#jx",
897 qtype, qti->features);
898 IONIC_PRINT(DEBUG, " qtype[%d].desc_sz = %d",
899 qtype, qti->desc_sz);
900 IONIC_PRINT(DEBUG, " qtype[%d].comp_sz = %d",
901 qtype, qti->comp_sz);
902 IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_sz = %d",
903 qtype, qti->sg_desc_sz);
904 IONIC_PRINT(DEBUG, " qtype[%d].max_sg_elems = %d",
905 qtype, qti->max_sg_elems);
906 IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_stride = %d",
907 qtype, qti->sg_desc_stride);
912 ionic_lif_alloc(struct ionic_lif *lif)
914 struct ionic_adapter *adapter = lif->adapter;
915 uint32_t socket_id = rte_socket_id();
919 * lif->name was zeroed on allocation.
920 * Copy (sizeof() - 1) bytes to ensure that it is NULL terminated.
922 memcpy(lif->name, lif->eth_dev->data->name, sizeof(lif->name) - 1);
924 IONIC_PRINT(DEBUG, "LIF: %s", lif->name);
926 ionic_lif_queue_identify(lif);
928 IONIC_PRINT(DEBUG, "Allocating Lif Info");
930 rte_spinlock_init(&lif->adminq_lock);
931 rte_spinlock_init(&lif->adminq_service_lock);
933 lif->kern_dbpage = ionic_bus_map_dbpage(adapter, 0);
934 if (!lif->kern_dbpage) {
935 IONIC_PRINT(ERR, "Cannot map dbpage, aborting");
939 lif->txqcqs = rte_zmalloc("ionic", sizeof(*lif->txqcqs) *
940 adapter->max_ntxqs_per_lif, 0);
943 IONIC_PRINT(ERR, "Cannot allocate tx queues array");
947 lif->rxqcqs = rte_zmalloc("ionic", sizeof(*lif->rxqcqs) *
948 adapter->max_nrxqs_per_lif, 0);
951 IONIC_PRINT(ERR, "Cannot allocate rx queues array");
955 IONIC_PRINT(DEBUG, "Allocating Notify Queue");
957 err = ionic_notify_qcq_alloc(lif);
959 IONIC_PRINT(ERR, "Cannot allocate notify queue");
963 IONIC_PRINT(DEBUG, "Allocating Admin Queue");
965 err = ionic_admin_qcq_alloc(lif);
967 IONIC_PRINT(ERR, "Cannot allocate admin queue");
971 IONIC_PRINT(DEBUG, "Allocating Lif Info");
973 lif->info_sz = RTE_ALIGN(sizeof(*lif->info), PAGE_SIZE);
975 lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev,
976 "lif_info", 0 /* queue_idx*/,
977 lif->info_sz, IONIC_ALIGN, socket_id);
979 IONIC_PRINT(ERR, "Cannot allocate lif info memory");
983 lif->info = lif->info_z->addr;
984 lif->info_pa = lif->info_z->iova;
990 ionic_lif_free(struct ionic_lif *lif)
992 if (lif->notifyqcq) {
993 ionic_qcq_free(lif->notifyqcq);
994 lif->notifyqcq = NULL;
998 ionic_qcq_free(lif->adminqcq);
999 lif->adminqcq = NULL;
1003 rte_free(lif->txqcqs);
1008 rte_free(lif->rxqcqs);
1013 rte_memzone_free(lif->info_z);
1019 ionic_lif_free_queues(struct ionic_lif *lif)
1023 for (i = 0; i < lif->ntxqcqs; i++) {
1024 ionic_dev_tx_queue_release(lif->eth_dev->data->tx_queues[i]);
1025 lif->eth_dev->data->tx_queues[i] = NULL;
1027 for (i = 0; i < lif->nrxqcqs; i++) {
1028 ionic_dev_rx_queue_release(lif->eth_dev->data->rx_queues[i]);
1029 lif->eth_dev->data->rx_queues[i] = NULL;
1034 ionic_lif_rss_config(struct ionic_lif *lif,
1035 const uint16_t types, const uint8_t *key, const uint32_t *indir)
1037 struct ionic_adapter *adapter = lif->adapter;
1038 struct ionic_admin_ctx ctx = {
1039 .pending_work = true,
1040 .cmd.lif_setattr = {
1041 .opcode = IONIC_CMD_LIF_SETATTR,
1042 .attr = IONIC_LIF_ATTR_RSS,
1043 .rss.types = rte_cpu_to_le_16(types),
1044 .rss.addr = rte_cpu_to_le_64(lif->rss_ind_tbl_pa),
1049 rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1053 lif->rss_types = types;
1056 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1059 for (i = 0; i < tbl_sz; i++)
1060 lif->rss_ind_tbl[i] = indir[i];
1062 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1063 IONIC_RSS_HASH_KEY_SIZE);
1065 return ionic_adminq_post_wait(lif, &ctx);
1069 ionic_lif_rss_setup(struct ionic_lif *lif)
1071 struct ionic_adapter *adapter = lif->adapter;
1072 static const uint8_t toeplitz_symmetric_key[] = {
1073 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1074 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1075 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1076 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1077 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A,
1081 rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz);
1085 if (!lif->rss_ind_tbl_z) {
1086 lif->rss_ind_tbl_z = rte_eth_dma_zone_reserve(lif->eth_dev,
1087 "rss_ind_tbl", 0 /* queue_idx */,
1088 sizeof(*lif->rss_ind_tbl) * tbl_sz,
1089 IONIC_ALIGN, rte_socket_id());
1090 if (!lif->rss_ind_tbl_z) {
1091 IONIC_PRINT(ERR, "OOM");
1095 lif->rss_ind_tbl = lif->rss_ind_tbl_z->addr;
1096 lif->rss_ind_tbl_pa = lif->rss_ind_tbl_z->iova;
1099 if (lif->rss_ind_tbl_nrxqcqs != lif->nrxqcqs) {
1100 lif->rss_ind_tbl_nrxqcqs = lif->nrxqcqs;
1102 /* Fill indirection table with 'default' values */
1103 for (i = 0; i < tbl_sz; i++)
1104 lif->rss_ind_tbl[i] = i % lif->nrxqcqs;
1107 return ionic_lif_rss_config(lif, IONIC_RSS_OFFLOAD_ALL,
1108 toeplitz_symmetric_key, NULL);
1112 ionic_lif_rss_teardown(struct ionic_lif *lif)
1114 if (!lif->rss_ind_tbl)
1117 if (lif->rss_ind_tbl_z) {
1118 /* Disable RSS on the NIC */
1119 ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1121 lif->rss_ind_tbl = NULL;
1122 lif->rss_ind_tbl_pa = 0;
1123 rte_memzone_free(lif->rss_ind_tbl_z);
1124 lif->rss_ind_tbl_z = NULL;
1129 ionic_lif_qcq_deinit(struct ionic_qcq *qcq)
1131 qcq->flags &= ~IONIC_QCQ_F_INITED;
1135 ionic_lif_txq_deinit(struct ionic_qcq *qcq)
1137 ionic_lif_qcq_deinit(qcq);
1141 ionic_lif_rxq_deinit(struct ionic_qcq *qcq)
1143 ionic_lif_qcq_deinit(qcq);
1147 ionic_lif_notifyq_deinit(struct ionic_lif *lif)
1149 struct ionic_qcq *nqcq = lif->notifyqcq;
1150 struct ionic_dev *idev = &lif->adapter->idev;
1152 if (!(nqcq->flags & IONIC_QCQ_F_INITED))
1155 ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index,
1156 IONIC_INTR_MASK_SET);
1158 nqcq->flags &= ~IONIC_QCQ_F_INITED;
1162 ionic_adminq_service(struct ionic_cq *cq, uint32_t cq_desc_index,
1163 void *cb_arg __rte_unused)
1165 struct ionic_admin_comp *cq_desc_base = cq->base;
1166 struct ionic_admin_comp *cq_desc = &cq_desc_base[cq_desc_index];
1168 if (!color_match(cq_desc->color, cq->done_color))
1171 ionic_q_service(cq->bound_q, cq_desc_index, cq_desc->comp_index, NULL);
1176 /* This acts like ionic_napi */
1178 ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb,
1181 struct ionic_cq *cq = &qcq->cq;
1184 work_done = ionic_cq_service(cq, budget, cb, cb_arg);
1190 ionic_link_status_check(struct ionic_lif *lif)
1192 struct ionic_adapter *adapter = lif->adapter;
1195 lif->state &= ~IONIC_LIF_F_LINK_CHECK_NEEDED;
1200 link_up = (lif->info->status.link_status == IONIC_PORT_OPER_STATUS_UP);
1202 if ((link_up && adapter->link_up) ||
1203 (!link_up && !adapter->link_up))
1207 adapter->link_speed =
1208 rte_le_to_cpu_32(lif->info->status.link_speed);
1209 IONIC_PRINT(DEBUG, "Link up - %d Gbps",
1210 adapter->link_speed);
1212 IONIC_PRINT(DEBUG, "Link down");
1215 adapter->link_up = link_up;
1216 ionic_dev_link_update(lif->eth_dev, 0);
1220 ionic_lif_handle_fw_down(struct ionic_lif *lif)
1222 if (lif->state & IONIC_LIF_F_FW_RESET)
1225 lif->state |= IONIC_LIF_F_FW_RESET;
1227 if (lif->state & IONIC_LIF_F_UP) {
1229 "Surprise FW stop, stopping %s\n", lif->name);
1230 ionic_lif_stop(lif);
1233 IONIC_PRINT(NOTICE, "FW down, %s stopped", lif->name);
1237 ionic_notifyq_cb(struct ionic_cq *cq, uint32_t cq_desc_index, void *cb_arg)
1239 union ionic_notifyq_comp *cq_desc_base = cq->base;
1240 union ionic_notifyq_comp *cq_desc = &cq_desc_base[cq_desc_index];
1241 struct ionic_lif *lif = cb_arg;
1243 IONIC_PRINT(DEBUG, "Notifyq callback eid = %jd ecode = %d",
1244 cq_desc->event.eid, cq_desc->event.ecode);
1246 /* Have we run out of new completions to process? */
1247 if (!(cq_desc->event.eid > lif->last_eid))
1250 lif->last_eid = cq_desc->event.eid;
1252 switch (cq_desc->event.ecode) {
1253 case IONIC_EVENT_LINK_CHANGE:
1255 "Notifyq IONIC_EVENT_LINK_CHANGE %s "
1256 "eid=%jd link_status=%d link_speed=%d",
1259 cq_desc->link_change.link_status,
1260 cq_desc->link_change.link_speed);
1262 lif->state |= IONIC_LIF_F_LINK_CHECK_NEEDED;
1265 case IONIC_EVENT_RESET:
1267 "Notifyq IONIC_EVENT_RESET %s "
1268 "eid=%jd, reset_code=%d state=%d",
1271 cq_desc->reset.reset_code,
1272 cq_desc->reset.state);
1273 ionic_lif_handle_fw_down(lif);
1277 IONIC_PRINT(WARNING, "Notifyq bad event ecode=%d eid=%jd",
1278 cq_desc->event.ecode, cq_desc->event.eid);
1286 ionic_notifyq_handler(struct ionic_lif *lif, int budget)
1288 struct ionic_dev *idev = &lif->adapter->idev;
1289 struct ionic_qcq *qcq = lif->notifyqcq;
1292 if (!(qcq->flags & IONIC_QCQ_F_INITED)) {
1293 IONIC_PRINT(DEBUG, "Notifyq not yet initialized");
1297 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
1298 IONIC_INTR_MASK_SET);
1300 work_done = ionic_qcq_service(qcq, budget, ionic_notifyq_cb, lif);
1302 if (lif->state & IONIC_LIF_F_LINK_CHECK_NEEDED)
1303 ionic_link_status_check(lif);
1305 ionic_intr_credits(idev->intr_ctrl, qcq->intr.index,
1306 work_done, IONIC_INTR_CRED_RESET_COALESCE);
1308 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
1309 IONIC_INTR_MASK_CLEAR);
1315 ionic_lif_adminq_init(struct ionic_lif *lif)
1317 struct ionic_dev *idev = &lif->adapter->idev;
1318 struct ionic_qcq *qcq = lif->adminqcq;
1319 struct ionic_queue *q = &qcq->q;
1320 struct ionic_q_init_comp comp;
1323 ionic_dev_cmd_adminq_init(idev, qcq);
1324 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1328 ionic_dev_cmd_comp(idev, &comp);
1330 q->hw_type = comp.hw_type;
1331 q->hw_index = rte_le_to_cpu_32(comp.hw_index);
1332 q->db = ionic_db_map(lif, q);
1334 IONIC_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type);
1335 IONIC_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index);
1336 IONIC_PRINT(DEBUG, "adminq->db %p", q->db);
1338 qcq->flags |= IONIC_QCQ_F_INITED;
1344 ionic_lif_notifyq_init(struct ionic_lif *lif)
1346 struct ionic_dev *idev = &lif->adapter->idev;
1347 struct ionic_qcq *qcq = lif->notifyqcq;
1348 struct ionic_queue *q = &qcq->q;
1351 struct ionic_admin_ctx ctx = {
1352 .pending_work = true,
1354 .opcode = IONIC_CMD_Q_INIT,
1356 .ver = lif->qtype_info[q->type].version,
1357 .index = rte_cpu_to_le_32(q->index),
1358 .intr_index = rte_cpu_to_le_16(qcq->intr.index),
1359 .flags = rte_cpu_to_le_16(IONIC_QINIT_F_IRQ |
1361 .ring_size = rte_log2_u32(q->num_descs),
1362 .ring_base = rte_cpu_to_le_64(q->base_pa),
1366 IONIC_PRINT(DEBUG, "notifyq_init.index %d", q->index);
1367 IONIC_PRINT(DEBUG, "notifyq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1368 IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d",
1369 ctx.cmd.q_init.ring_size);
1370 IONIC_PRINT(DEBUG, "notifyq_init.ver %u", ctx.cmd.q_init.ver);
1372 err = ionic_adminq_post_wait(lif, &ctx);
1376 q->hw_type = ctx.comp.q_init.hw_type;
1377 q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1380 IONIC_PRINT(DEBUG, "notifyq->hw_type %d", q->hw_type);
1381 IONIC_PRINT(DEBUG, "notifyq->hw_index %d", q->hw_index);
1382 IONIC_PRINT(DEBUG, "notifyq->db %p", q->db);
1384 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
1385 IONIC_INTR_MASK_CLEAR);
1387 qcq->flags |= IONIC_QCQ_F_INITED;
1393 ionic_lif_set_features(struct ionic_lif *lif)
1395 struct ionic_admin_ctx ctx = {
1396 .pending_work = true,
1397 .cmd.lif_setattr = {
1398 .opcode = IONIC_CMD_LIF_SETATTR,
1399 .attr = IONIC_LIF_ATTR_FEATURES,
1400 .features = rte_cpu_to_le_64(lif->features),
1405 err = ionic_adminq_post_wait(lif, &ctx);
1409 lif->hw_features = rte_le_to_cpu_64(ctx.cmd.lif_setattr.features &
1410 ctx.comp.lif_setattr.features);
1412 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1413 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_TX_TAG");
1414 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1415 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_STRIP");
1416 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1417 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_FILTER");
1418 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1419 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_HASH");
1420 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1421 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_SG");
1422 if (lif->hw_features & IONIC_ETH_HW_RX_SG)
1423 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_SG");
1424 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1425 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_CSUM");
1426 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1427 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_CSUM");
1428 if (lif->hw_features & IONIC_ETH_HW_TSO)
1429 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO");
1430 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1431 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPV6");
1432 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1433 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_ECN");
1434 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1435 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE");
1436 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1437 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE_CSUM");
1438 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1439 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP4");
1440 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1441 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP6");
1442 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1443 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP");
1444 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1445 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP_CSUM");
1451 ionic_lif_txq_init(struct ionic_qcq *qcq)
1453 struct ionic_queue *q = &qcq->q;
1454 struct ionic_lif *lif = qcq->lif;
1455 struct ionic_cq *cq = &qcq->cq;
1456 struct ionic_admin_ctx ctx = {
1457 .pending_work = true,
1459 .opcode = IONIC_CMD_Q_INIT,
1461 .ver = lif->qtype_info[q->type].version,
1462 .index = rte_cpu_to_le_32(q->index),
1463 .flags = rte_cpu_to_le_16(IONIC_QINIT_F_SG |
1465 .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1466 .ring_size = rte_log2_u32(q->num_descs),
1467 .ring_base = rte_cpu_to_le_64(q->base_pa),
1468 .cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1469 .sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1474 IONIC_PRINT(DEBUG, "txq_init.index %d", q->index);
1475 IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1476 IONIC_PRINT(DEBUG, "txq_init.ring_size %d",
1477 ctx.cmd.q_init.ring_size);
1478 IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx.cmd.q_init.ver);
1480 err = ionic_adminq_post_wait(qcq->lif, &ctx);
1484 q->hw_type = ctx.comp.q_init.hw_type;
1485 q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1486 q->db = ionic_db_map(lif, q);
1488 IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type);
1489 IONIC_PRINT(DEBUG, "txq->hw_index %d", q->hw_index);
1490 IONIC_PRINT(DEBUG, "txq->db %p", q->db);
1492 qcq->flags |= IONIC_QCQ_F_INITED;
1498 ionic_lif_rxq_init(struct ionic_qcq *qcq)
1500 struct ionic_queue *q = &qcq->q;
1501 struct ionic_lif *lif = qcq->lif;
1502 struct ionic_cq *cq = &qcq->cq;
1503 struct ionic_admin_ctx ctx = {
1504 .pending_work = true,
1506 .opcode = IONIC_CMD_Q_INIT,
1508 .ver = lif->qtype_info[q->type].version,
1509 .index = rte_cpu_to_le_32(q->index),
1510 .flags = rte_cpu_to_le_16(IONIC_QINIT_F_SG |
1512 .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE),
1513 .ring_size = rte_log2_u32(q->num_descs),
1514 .ring_base = rte_cpu_to_le_64(q->base_pa),
1515 .cq_ring_base = rte_cpu_to_le_64(cq->base_pa),
1516 .sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa),
1521 IONIC_PRINT(DEBUG, "rxq_init.index %d", q->index);
1522 IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", q->base_pa);
1523 IONIC_PRINT(DEBUG, "rxq_init.ring_size %d",
1524 ctx.cmd.q_init.ring_size);
1525 IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx.cmd.q_init.ver);
1527 err = ionic_adminq_post_wait(qcq->lif, &ctx);
1531 q->hw_type = ctx.comp.q_init.hw_type;
1532 q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index);
1533 q->db = ionic_db_map(lif, q);
1535 qcq->flags |= IONIC_QCQ_F_INITED;
1537 IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type);
1538 IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index);
1539 IONIC_PRINT(DEBUG, "rxq->db %p", q->db);
1545 ionic_station_set(struct ionic_lif *lif)
1547 struct ionic_admin_ctx ctx = {
1548 .pending_work = true,
1549 .cmd.lif_getattr = {
1550 .opcode = IONIC_CMD_LIF_GETATTR,
1551 .attr = IONIC_LIF_ATTR_MAC,
1558 err = ionic_adminq_post_wait(lif, &ctx);
1562 memcpy(lif->mac_addr, ctx.comp.lif_getattr.mac, RTE_ETHER_ADDR_LEN);
1568 ionic_lif_set_name(struct ionic_lif *lif)
1570 struct ionic_admin_ctx ctx = {
1571 .pending_work = true,
1572 .cmd.lif_setattr = {
1573 .opcode = IONIC_CMD_LIF_SETATTR,
1574 .attr = IONIC_LIF_ATTR_NAME,
1578 memcpy(ctx.cmd.lif_setattr.name, lif->name,
1579 sizeof(ctx.cmd.lif_setattr.name) - 1);
1581 ionic_adminq_post_wait(lif, &ctx);
1585 ionic_lif_init(struct ionic_lif *lif)
1587 struct ionic_dev *idev = &lif->adapter->idev;
1588 struct ionic_q_init_comp comp;
1591 memset(&lif->stats_base, 0, sizeof(lif->stats_base));
1593 ionic_dev_cmd_lif_init(idev, lif->info_pa);
1594 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1595 ionic_dev_cmd_comp(idev, &comp);
1599 lif->hw_index = rte_cpu_to_le_16(comp.hw_index);
1601 err = ionic_lif_adminq_init(lif);
1605 err = ionic_lif_notifyq_init(lif);
1607 goto err_out_adminq_deinit;
1610 * Configure initial feature set
1611 * This will be updated later by the dev_configure() step
1613 lif->features = IONIC_ETH_HW_RX_HASH | IONIC_ETH_HW_VLAN_RX_FILTER;
1615 err = ionic_lif_set_features(lif);
1617 goto err_out_notifyq_deinit;
1619 err = ionic_rx_filters_init(lif);
1621 goto err_out_notifyq_deinit;
1623 err = ionic_station_set(lif);
1625 goto err_out_rx_filter_deinit;
1627 ionic_lif_set_name(lif);
1629 lif->state |= IONIC_LIF_F_INITED;
1633 err_out_rx_filter_deinit:
1634 ionic_rx_filters_deinit(lif);
1636 err_out_notifyq_deinit:
1637 ionic_lif_notifyq_deinit(lif);
1639 err_out_adminq_deinit:
1640 ionic_lif_qcq_deinit(lif->adminqcq);
1646 ionic_lif_deinit(struct ionic_lif *lif)
1648 if (!(lif->state & IONIC_LIF_F_INITED))
1651 ionic_rx_filters_deinit(lif);
1652 ionic_lif_rss_teardown(lif);
1653 ionic_lif_notifyq_deinit(lif);
1654 ionic_lif_qcq_deinit(lif->adminqcq);
1656 lif->state &= ~IONIC_LIF_F_INITED;
1660 ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask)
1662 struct rte_eth_dev *eth_dev = lif->eth_dev;
1663 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
1666 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so
1667 * set DEV_RX_OFFLOAD_VLAN_FILTER and ignore ETH_VLAN_FILTER_MASK
1669 rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
1671 if (mask & ETH_VLAN_STRIP_MASK) {
1672 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1673 lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP;
1675 lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP;
1680 ionic_lif_configure(struct ionic_lif *lif)
1682 struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode;
1683 struct rte_eth_txmode *txmode = &lif->eth_dev->data->dev_conf.txmode;
1684 struct ionic_identity *ident = &lif->adapter->ident;
1685 union ionic_lif_config *cfg = &ident->lif.eth.config;
1686 uint32_t ntxqs_per_lif =
1687 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
1688 uint32_t nrxqs_per_lif =
1689 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
1690 uint32_t nrxqs = lif->eth_dev->data->nb_rx_queues;
1691 uint32_t ntxqs = lif->eth_dev->data->nb_tx_queues;
1693 lif->port_id = lif->eth_dev->data->port_id;
1695 IONIC_PRINT(DEBUG, "Configuring LIF on port %u",
1699 nrxqs_per_lif = RTE_MIN(nrxqs_per_lif, nrxqs);
1702 ntxqs_per_lif = RTE_MIN(ntxqs_per_lif, ntxqs);
1704 lif->nrxqcqs = nrxqs_per_lif;
1705 lif->ntxqcqs = ntxqs_per_lif;
1707 /* Update the LIF configuration based on the eth_dev */
1710 * NB: While it is true that RSS_HASH is always enabled on ionic,
1711 * setting this flag unconditionally causes problems in DTS.
1712 * rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1717 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM ||
1718 rxmode->offloads & DEV_RX_OFFLOAD_UDP_CKSUM ||
1719 rxmode->offloads & DEV_RX_OFFLOAD_TCP_CKSUM)
1720 lif->features |= IONIC_ETH_HW_RX_CSUM;
1722 lif->features &= ~IONIC_ETH_HW_RX_CSUM;
1724 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) {
1725 lif->features |= IONIC_ETH_HW_RX_SG;
1726 lif->eth_dev->data->scattered_rx = 1;
1728 lif->features &= ~IONIC_ETH_HW_RX_SG;
1729 lif->eth_dev->data->scattered_rx = 0;
1732 /* Covers VLAN_STRIP */
1733 ionic_lif_configure_vlan_offload(lif, ETH_VLAN_STRIP_MASK);
1737 if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
1738 txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
1739 txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM ||
1740 txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
1741 txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
1742 lif->features |= IONIC_ETH_HW_TX_CSUM;
1744 lif->features &= ~IONIC_ETH_HW_TX_CSUM;
1746 if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
1747 lif->features |= IONIC_ETH_HW_VLAN_TX_TAG;
1749 lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG;
1751 if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
1752 lif->features |= IONIC_ETH_HW_TX_SG;
1754 lif->features &= ~IONIC_ETH_HW_TX_SG;
1756 if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
1757 lif->features |= IONIC_ETH_HW_TSO;
1758 lif->features |= IONIC_ETH_HW_TSO_IPV6;
1759 lif->features |= IONIC_ETH_HW_TSO_ECN;
1761 lif->features &= ~IONIC_ETH_HW_TSO;
1762 lif->features &= ~IONIC_ETH_HW_TSO_IPV6;
1763 lif->features &= ~IONIC_ETH_HW_TSO_ECN;
1768 ionic_lif_start(struct ionic_lif *lif)
1774 err = ionic_lif_rss_setup(lif);
1778 if (!lif->rx_mode) {
1779 IONIC_PRINT(DEBUG, "Setting RX mode on %s",
1782 rx_mode = IONIC_RX_MODE_F_UNICAST;
1783 rx_mode |= IONIC_RX_MODE_F_MULTICAST;
1784 rx_mode |= IONIC_RX_MODE_F_BROADCAST;
1786 ionic_set_rx_mode(lif, rx_mode);
1789 IONIC_PRINT(DEBUG, "Starting %u RX queues and %u TX queues "
1791 lif->nrxqcqs, lif->ntxqcqs, lif->port_id);
1793 for (i = 0; i < lif->nrxqcqs; i++) {
1794 struct ionic_qcq *rxq = lif->rxqcqs[i];
1795 if (!(rxq->flags & IONIC_QCQ_F_DEFERRED)) {
1796 err = ionic_dev_rx_queue_start(lif->eth_dev, i);
1803 for (i = 0; i < lif->ntxqcqs; i++) {
1804 struct ionic_qcq *txq = lif->txqcqs[i];
1805 if (!(txq->flags & IONIC_QCQ_F_DEFERRED)) {
1806 err = ionic_dev_tx_queue_start(lif->eth_dev, i);
1813 /* Carrier ON here */
1814 lif->state |= IONIC_LIF_F_UP;
1816 ionic_link_status_check(lif);
1822 ionic_lif_identify(struct ionic_adapter *adapter)
1824 struct ionic_dev *idev = &adapter->idev;
1825 struct ionic_identity *ident = &adapter->ident;
1826 union ionic_lif_config *cfg = &ident->lif.eth.config;
1829 unsigned int lif_words = sizeof(ident->lif.words) /
1830 sizeof(ident->lif.words[0]);
1831 unsigned int cmd_words = sizeof(idev->dev_cmd->data) /
1832 sizeof(idev->dev_cmd->data[0]);
1833 unsigned int nwords;
1835 ionic_dev_cmd_lif_identify(idev, IONIC_LIF_TYPE_CLASSIC,
1836 IONIC_IDENTITY_VERSION_1);
1837 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT);
1841 nwords = RTE_MIN(lif_words, cmd_words);
1842 for (i = 0; i < nwords; i++)
1843 ident->lif.words[i] = ioread32(&idev->dev_cmd->data[i]);
1845 IONIC_PRINT(INFO, "capabilities 0x%" PRIx64 " ",
1846 rte_le_to_cpu_64(ident->lif.capabilities));
1848 IONIC_PRINT(INFO, "eth.max_ucast_filters 0x%" PRIx32 " ",
1849 rte_le_to_cpu_32(ident->lif.eth.max_ucast_filters));
1850 IONIC_PRINT(INFO, "eth.max_mcast_filters 0x%" PRIx32 " ",
1851 rte_le_to_cpu_32(ident->lif.eth.max_mcast_filters));
1853 IONIC_PRINT(INFO, "eth.features 0x%" PRIx64 " ",
1854 rte_le_to_cpu_64(cfg->features));
1855 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_ADMINQ] 0x%" PRIx32 " ",
1856 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_ADMINQ]));
1857 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] 0x%" PRIx32 " ",
1858 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_NOTIFYQ]));
1859 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_RXQ] 0x%" PRIx32 " ",
1860 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]));
1861 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_TXQ] 0x%" PRIx32 " ",
1862 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]));
1868 ionic_lifs_size(struct ionic_adapter *adapter)
1870 struct ionic_identity *ident = &adapter->ident;
1871 union ionic_lif_config *cfg = &ident->lif.eth.config;
1872 uint32_t nintrs, dev_nintrs = rte_le_to_cpu_32(ident->dev.nintrs);
1874 adapter->max_ntxqs_per_lif =
1875 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]);
1876 adapter->max_nrxqs_per_lif =
1877 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]);
1879 nintrs = 1 /* notifyq */;
1881 if (nintrs > dev_nintrs) {
1883 "At most %d intr supported, minimum req'd is %u",
1884 dev_nintrs, nintrs);
1888 adapter->nintrs = nintrs;