1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_driver.h>
7 #include <rte_ethdev_pci.h>
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_alarm.h>
11 #include <rte_ether.h>
14 #include "lio_23xx_vf.h"
15 #include "lio_ethdev.h"
19 int lio_logtype_driver;
21 /* Default RSS key in use */
22 static uint8_t lio_rss_key[40] = {
23 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
24 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
25 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
26 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
27 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
30 static const struct rte_eth_desc_lim lio_rx_desc_lim = {
31 .nb_max = CN23XX_MAX_OQ_DESCRIPTORS,
32 .nb_min = CN23XX_MIN_OQ_DESCRIPTORS,
36 static const struct rte_eth_desc_lim lio_tx_desc_lim = {
37 .nb_max = CN23XX_MAX_IQ_DESCRIPTORS,
38 .nb_min = CN23XX_MIN_IQ_DESCRIPTORS,
42 /* Wait for control command to reach nic. */
44 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
45 struct lio_dev_ctrl_cmd *ctrl_cmd)
47 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
49 while ((ctrl_cmd->cond == 0) && --timeout) {
50 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
58 * \brief Send Rx control command
59 * @param eth_dev Pointer to the structure rte_eth_dev
60 * @param start_stop whether to start or stop
63 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
65 struct lio_device *lio_dev = LIO_DEV(eth_dev);
66 struct lio_dev_ctrl_cmd ctrl_cmd;
67 struct lio_ctrl_pkt ctrl_pkt;
69 /* flush added to prevent cmd failure
70 * incase the queue is full
72 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
74 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
75 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
77 ctrl_cmd.eth_dev = eth_dev;
80 ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
81 ctrl_pkt.ncmd.s.param1 = start_stop;
82 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
84 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
85 lio_dev_err(lio_dev, "Failed to send RX Control message\n");
89 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
90 lio_dev_err(lio_dev, "RX Control command timed out\n");
97 /* store statistics names and its offset in stats structure */
98 struct rte_lio_xstats_name_off {
99 char name[RTE_ETH_XSTATS_NAME_SIZE];
103 static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
104 {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
105 {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
106 {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
107 {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
108 {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
109 {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
110 {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
111 {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
112 {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
113 {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
114 {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
115 {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
116 {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
117 {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
118 sizeof(struct octeon_rx_stats)},
119 {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
120 sizeof(struct octeon_rx_stats)},
121 {"tx_broadcast_pkts",
122 (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
123 sizeof(struct octeon_rx_stats)},
124 {"tx_multicast_pkts",
125 (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
126 sizeof(struct octeon_rx_stats)},
127 {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
128 sizeof(struct octeon_rx_stats)},
129 {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
130 sizeof(struct octeon_rx_stats)},
131 {"tx_total_collisions", (offsetof(struct octeon_tx_stats,
133 sizeof(struct octeon_rx_stats)},
134 {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
135 sizeof(struct octeon_rx_stats)},
136 {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
137 sizeof(struct octeon_rx_stats)},
140 #define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings)
142 /* Get hw stats of the port */
144 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
147 struct lio_device *lio_dev = LIO_DEV(eth_dev);
148 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
149 struct octeon_link_stats *hw_stats;
150 struct lio_link_stats_resp *resp;
151 struct lio_soft_command *sc;
156 if (!lio_dev->intf_open) {
157 lio_dev_err(lio_dev, "Port %d down\n",
162 if (n < LIO_NB_XSTATS)
163 return LIO_NB_XSTATS;
165 resp_size = sizeof(struct lio_link_stats_resp);
166 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
170 resp = (struct lio_link_stats_resp *)sc->virtrptr;
171 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
172 LIO_OPCODE_PORT_STATS, 0, 0, 0);
174 /* Setting wait time in seconds */
175 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
177 retval = lio_send_soft_command(lio_dev, sc);
178 if (retval == LIO_IQ_SEND_FAILED) {
179 lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
184 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
185 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
186 lio_process_ordered_list(lio_dev);
190 retval = resp->status;
192 lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
196 lio_swap_8B_data((uint64_t *)(&resp->link_stats),
197 sizeof(struct octeon_link_stats) >> 3);
199 hw_stats = &resp->link_stats;
201 for (i = 0; i < LIO_NB_XSTATS; i++) {
204 *(uint64_t *)(((char *)hw_stats) +
205 rte_lio_stats_strings[i].offset);
208 lio_free_soft_command(sc);
210 return LIO_NB_XSTATS;
213 lio_free_soft_command(sc);
219 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
220 struct rte_eth_xstat_name *xstats_names,
221 unsigned limit __rte_unused)
223 struct lio_device *lio_dev = LIO_DEV(eth_dev);
226 if (!lio_dev->intf_open) {
227 lio_dev_err(lio_dev, "Port %d down\n",
232 if (xstats_names == NULL)
233 return LIO_NB_XSTATS;
235 /* Note: limit checked in rte_eth_xstats_names() */
237 for (i = 0; i < LIO_NB_XSTATS; i++) {
238 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
239 "%s", rte_lio_stats_strings[i].name);
242 return LIO_NB_XSTATS;
245 /* Reset hw stats for the port */
247 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
249 struct lio_device *lio_dev = LIO_DEV(eth_dev);
250 struct lio_dev_ctrl_cmd ctrl_cmd;
251 struct lio_ctrl_pkt ctrl_pkt;
253 if (!lio_dev->intf_open) {
254 lio_dev_err(lio_dev, "Port %d down\n",
259 /* flush added to prevent cmd failure
260 * incase the queue is full
262 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
264 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
265 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
267 ctrl_cmd.eth_dev = eth_dev;
270 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
271 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
273 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
274 lio_dev_err(lio_dev, "Failed to send clear stats command\n");
278 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
279 lio_dev_err(lio_dev, "Clear stats command timed out\n");
283 /* clear stored per queue stats */
284 RTE_FUNC_PTR_OR_RET(*eth_dev->dev_ops->stats_reset);
285 (*eth_dev->dev_ops->stats_reset)(eth_dev);
288 /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
290 lio_dev_stats_get(struct rte_eth_dev *eth_dev,
291 struct rte_eth_stats *stats)
293 struct lio_device *lio_dev = LIO_DEV(eth_dev);
294 struct lio_droq_stats *oq_stats;
295 struct lio_iq_stats *iq_stats;
296 struct lio_instr_queue *txq;
297 struct lio_droq *droq;
303 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
304 iq_no = lio_dev->linfo.txpciq[i].s.q_no;
305 txq = lio_dev->instr_queue[iq_no];
307 iq_stats = &txq->stats;
308 pkts += iq_stats->tx_done;
309 drop += iq_stats->tx_dropped;
310 bytes += iq_stats->tx_tot_bytes;
314 stats->opackets = pkts;
315 stats->obytes = bytes;
316 stats->oerrors = drop;
322 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
323 oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
324 droq = lio_dev->droq[oq_no];
326 oq_stats = &droq->stats;
327 pkts += oq_stats->rx_pkts_received;
328 drop += (oq_stats->rx_dropped +
329 oq_stats->dropped_toomany +
330 oq_stats->dropped_nomem);
331 bytes += oq_stats->rx_bytes_received;
334 stats->ibytes = bytes;
335 stats->ipackets = pkts;
336 stats->ierrors = drop;
342 lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
344 struct lio_device *lio_dev = LIO_DEV(eth_dev);
345 struct lio_droq_stats *oq_stats;
346 struct lio_iq_stats *iq_stats;
347 struct lio_instr_queue *txq;
348 struct lio_droq *droq;
351 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
352 iq_no = lio_dev->linfo.txpciq[i].s.q_no;
353 txq = lio_dev->instr_queue[iq_no];
355 iq_stats = &txq->stats;
356 memset(iq_stats, 0, sizeof(struct lio_iq_stats));
360 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
361 oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
362 droq = lio_dev->droq[oq_no];
364 oq_stats = &droq->stats;
365 memset(oq_stats, 0, sizeof(struct lio_droq_stats));
371 lio_dev_info_get(struct rte_eth_dev *eth_dev,
372 struct rte_eth_dev_info *devinfo)
374 struct lio_device *lio_dev = LIO_DEV(eth_dev);
375 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
377 switch (pci_dev->id.subsystem_device_id) {
378 /* CN23xx 10G cards */
379 case PCI_SUBSYS_DEV_ID_CN2350_210:
380 case PCI_SUBSYS_DEV_ID_CN2360_210:
381 case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3:
382 case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
383 case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
384 case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
385 devinfo->speed_capa = ETH_LINK_SPEED_10G;
387 /* CN23xx 25G cards */
388 case PCI_SUBSYS_DEV_ID_CN2350_225:
389 case PCI_SUBSYS_DEV_ID_CN2360_225:
390 devinfo->speed_capa = ETH_LINK_SPEED_25G;
393 devinfo->speed_capa = ETH_LINK_SPEED_10G;
395 "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
399 devinfo->max_rx_queues = lio_dev->max_rx_queues;
400 devinfo->max_tx_queues = lio_dev->max_tx_queues;
402 devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
403 devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
405 devinfo->max_mac_addrs = 1;
407 devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
408 DEV_RX_OFFLOAD_UDP_CKSUM |
409 DEV_RX_OFFLOAD_TCP_CKSUM |
410 DEV_RX_OFFLOAD_VLAN_STRIP);
411 devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
412 DEV_TX_OFFLOAD_UDP_CKSUM |
413 DEV_TX_OFFLOAD_TCP_CKSUM |
414 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
416 devinfo->rx_desc_lim = lio_rx_desc_lim;
417 devinfo->tx_desc_lim = lio_tx_desc_lim;
419 devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
420 devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
421 devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 |
422 ETH_RSS_NONFRAG_IPV4_TCP |
424 ETH_RSS_NONFRAG_IPV6_TCP |
426 ETH_RSS_IPV6_TCP_EX);
431 lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
433 struct lio_device *lio_dev = LIO_DEV(eth_dev);
434 uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
435 uint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
436 struct lio_dev_ctrl_cmd ctrl_cmd;
437 struct lio_ctrl_pkt ctrl_pkt;
439 PMD_INIT_FUNC_TRACE();
441 if (!lio_dev->intf_open) {
442 lio_dev_err(lio_dev, "Port %d down, can't set MTU\n",
447 /* check if VF MTU is within allowed range.
448 * New value should not exceed PF MTU.
450 if (mtu < RTE_ETHER_MIN_MTU || mtu > pf_mtu) {
451 lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
452 RTE_ETHER_MIN_MTU, pf_mtu);
456 /* flush added to prevent cmd failure
457 * incase the queue is full
459 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
461 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
462 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
464 ctrl_cmd.eth_dev = eth_dev;
467 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU;
468 ctrl_pkt.ncmd.s.param1 = mtu;
469 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
471 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
472 lio_dev_err(lio_dev, "Failed to send command to change MTU\n");
476 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
477 lio_dev_err(lio_dev, "Command to change MTU timed out\n");
481 if (frame_len > RTE_ETHER_MAX_LEN)
482 eth_dev->data->dev_conf.rxmode.offloads |=
483 DEV_RX_OFFLOAD_JUMBO_FRAME;
485 eth_dev->data->dev_conf.rxmode.offloads &=
486 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
488 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
489 eth_dev->data->mtu = mtu;
495 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
496 struct rte_eth_rss_reta_entry64 *reta_conf,
499 struct lio_device *lio_dev = LIO_DEV(eth_dev);
500 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
501 struct lio_rss_set *rss_param;
502 struct lio_dev_ctrl_cmd ctrl_cmd;
503 struct lio_ctrl_pkt ctrl_pkt;
506 if (!lio_dev->intf_open) {
507 lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
512 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
514 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
515 reta_size, LIO_RSS_MAX_TABLE_SZ);
519 /* flush added to prevent cmd failure
520 * incase the queue is full
522 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
524 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
525 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
527 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
529 ctrl_cmd.eth_dev = eth_dev;
532 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
533 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
534 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
536 rss_param->param.flags = 0xF;
537 rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
538 rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
540 for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
541 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
542 if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
543 index = (i * RTE_RETA_GROUP_SIZE) + j;
544 rss_state->itable[index] = reta_conf[i].reta[j];
549 rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
550 memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
552 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
554 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
555 lio_dev_err(lio_dev, "Failed to set rss hash\n");
559 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
560 lio_dev_err(lio_dev, "Set rss hash timed out\n");
568 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
569 struct rte_eth_rss_reta_entry64 *reta_conf,
572 struct lio_device *lio_dev = LIO_DEV(eth_dev);
573 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
576 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
578 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
579 reta_size, LIO_RSS_MAX_TABLE_SZ);
583 num = reta_size / RTE_RETA_GROUP_SIZE;
585 for (i = 0; i < num; i++) {
586 memcpy(reta_conf->reta,
587 &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
588 RTE_RETA_GROUP_SIZE);
596 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
597 struct rte_eth_rss_conf *rss_conf)
599 struct lio_device *lio_dev = LIO_DEV(eth_dev);
600 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
601 uint8_t *hash_key = NULL;
604 if (rss_state->hash_disable) {
605 lio_dev_info(lio_dev, "RSS disabled in nic\n");
606 rss_conf->rss_hf = 0;
611 hash_key = rss_conf->rss_key;
612 if (hash_key != NULL)
613 memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
616 rss_hf |= ETH_RSS_IPV4;
617 if (rss_state->tcp_hash)
618 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
620 rss_hf |= ETH_RSS_IPV6;
621 if (rss_state->ipv6_tcp_hash)
622 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
623 if (rss_state->ipv6_ex)
624 rss_hf |= ETH_RSS_IPV6_EX;
625 if (rss_state->ipv6_tcp_ex_hash)
626 rss_hf |= ETH_RSS_IPV6_TCP_EX;
628 rss_conf->rss_hf = rss_hf;
634 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
635 struct rte_eth_rss_conf *rss_conf)
637 struct lio_device *lio_dev = LIO_DEV(eth_dev);
638 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
639 struct lio_rss_set *rss_param;
640 struct lio_dev_ctrl_cmd ctrl_cmd;
641 struct lio_ctrl_pkt ctrl_pkt;
643 if (!lio_dev->intf_open) {
644 lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
649 /* flush added to prevent cmd failure
650 * incase the queue is full
652 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
654 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
655 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
657 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
659 ctrl_cmd.eth_dev = eth_dev;
662 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
663 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
664 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
666 rss_param->param.flags = 0xF;
668 if (rss_conf->rss_key) {
669 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
670 rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
671 rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
672 memcpy(rss_state->hash_key, rss_conf->rss_key,
673 rss_state->hash_key_size);
674 memcpy(rss_param->key, rss_state->hash_key,
675 rss_state->hash_key_size);
678 if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
679 /* Can't disable rss through hash flags,
680 * if it is enabled by default during init
682 if (!rss_state->hash_disable)
685 /* This is for --disable-rss during testpmd launch */
686 rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
688 uint32_t hashinfo = 0;
690 /* Can't enable rss if disabled by default during init */
691 if (rss_state->hash_disable)
694 if (rss_conf->rss_hf & ETH_RSS_IPV4) {
695 hashinfo |= LIO_RSS_HASH_IPV4;
701 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
702 hashinfo |= LIO_RSS_HASH_TCP_IPV4;
703 rss_state->tcp_hash = 1;
705 rss_state->tcp_hash = 0;
708 if (rss_conf->rss_hf & ETH_RSS_IPV6) {
709 hashinfo |= LIO_RSS_HASH_IPV6;
715 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
716 hashinfo |= LIO_RSS_HASH_TCP_IPV6;
717 rss_state->ipv6_tcp_hash = 1;
719 rss_state->ipv6_tcp_hash = 0;
722 if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
723 hashinfo |= LIO_RSS_HASH_IPV6_EX;
724 rss_state->ipv6_ex = 1;
726 rss_state->ipv6_ex = 0;
729 if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
730 hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
731 rss_state->ipv6_tcp_ex_hash = 1;
733 rss_state->ipv6_tcp_ex_hash = 0;
736 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
737 rss_param->param.hashinfo = hashinfo;
740 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
742 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
743 lio_dev_err(lio_dev, "Failed to set rss hash\n");
747 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
748 lio_dev_err(lio_dev, "Set rss hash timed out\n");
756 * Add vxlan dest udp port for an interface.
759 * Pointer to the structure rte_eth_dev
764 * On success return 0
765 * On failure return -1
768 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
769 struct rte_eth_udp_tunnel *udp_tnl)
771 struct lio_device *lio_dev = LIO_DEV(eth_dev);
772 struct lio_dev_ctrl_cmd ctrl_cmd;
773 struct lio_ctrl_pkt ctrl_pkt;
778 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
779 lio_dev_err(lio_dev, "Unsupported tunnel type\n");
783 /* flush added to prevent cmd failure
784 * incase the queue is full
786 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
788 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
789 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
791 ctrl_cmd.eth_dev = eth_dev;
794 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
795 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
796 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
797 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
799 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
800 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
804 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
805 lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
813 * Remove vxlan dest udp port for an interface.
816 * Pointer to the structure rte_eth_dev
821 * On success return 0
822 * On failure return -1
825 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
826 struct rte_eth_udp_tunnel *udp_tnl)
828 struct lio_device *lio_dev = LIO_DEV(eth_dev);
829 struct lio_dev_ctrl_cmd ctrl_cmd;
830 struct lio_ctrl_pkt ctrl_pkt;
835 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
836 lio_dev_err(lio_dev, "Unsupported tunnel type\n");
840 /* flush added to prevent cmd failure
841 * incase the queue is full
843 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
845 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
846 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
848 ctrl_cmd.eth_dev = eth_dev;
851 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
852 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
853 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
854 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
856 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
857 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
861 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
862 lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
870 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
872 struct lio_device *lio_dev = LIO_DEV(eth_dev);
873 struct lio_dev_ctrl_cmd ctrl_cmd;
874 struct lio_ctrl_pkt ctrl_pkt;
876 if (lio_dev->linfo.vlan_is_admin_assigned)
879 /* flush added to prevent cmd failure
880 * incase the queue is full
882 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
884 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
885 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
887 ctrl_cmd.eth_dev = eth_dev;
890 ctrl_pkt.ncmd.s.cmd = on ?
891 LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
892 ctrl_pkt.ncmd.s.param1 = vlan_id;
893 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
895 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
896 lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
897 on ? "add" : "remove");
901 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
902 lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
903 on ? "add" : "remove");
911 lio_hweight64(uint64_t w)
913 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
916 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
917 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
918 res = res + (res >> 8);
919 res = res + (res >> 16);
921 return (res + (res >> 32)) & 0x00000000000000FFul;
925 lio_dev_link_update(struct rte_eth_dev *eth_dev,
926 int wait_to_complete __rte_unused)
928 struct lio_device *lio_dev = LIO_DEV(eth_dev);
929 struct rte_eth_link link;
932 memset(&link, 0, sizeof(link));
933 link.link_status = ETH_LINK_DOWN;
934 link.link_speed = ETH_SPEED_NUM_NONE;
935 link.link_duplex = ETH_LINK_HALF_DUPLEX;
936 link.link_autoneg = ETH_LINK_AUTONEG;
938 /* Return what we found */
939 if (lio_dev->linfo.link.s.link_up == 0) {
940 /* Interface is down */
941 return rte_eth_linkstatus_set(eth_dev, &link);
944 link.link_status = ETH_LINK_UP; /* Interface is up */
945 link.link_duplex = ETH_LINK_FULL_DUPLEX;
946 switch (lio_dev->linfo.link.s.speed) {
947 case LIO_LINK_SPEED_10000:
948 link.link_speed = ETH_SPEED_NUM_10G;
950 case LIO_LINK_SPEED_25000:
951 link.link_speed = ETH_SPEED_NUM_25G;
954 link.link_speed = ETH_SPEED_NUM_NONE;
955 link.link_duplex = ETH_LINK_HALF_DUPLEX;
958 return rte_eth_linkstatus_set(eth_dev, &link);
962 * \brief Net device enable, disable allmulticast
963 * @param eth_dev Pointer to the structure rte_eth_dev
966 lio_change_dev_flag(struct rte_eth_dev *eth_dev)
968 struct lio_device *lio_dev = LIO_DEV(eth_dev);
969 struct lio_dev_ctrl_cmd ctrl_cmd;
970 struct lio_ctrl_pkt ctrl_pkt;
972 /* flush added to prevent cmd failure
973 * incase the queue is full
975 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
977 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
978 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
980 ctrl_cmd.eth_dev = eth_dev;
983 /* Create a ctrl pkt command to be sent to core app. */
984 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
985 ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
986 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
988 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
989 lio_dev_err(lio_dev, "Failed to send change flag message\n");
993 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
994 lio_dev_err(lio_dev, "Change dev flag command timed out\n");
998 lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1000 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1002 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
1003 lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1004 LIO_VF_TRUST_MIN_VERSION);
1008 if (!lio_dev->intf_open) {
1009 lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n",
1014 lio_dev->ifflags |= LIO_IFFLAG_PROMISC;
1015 lio_change_dev_flag(eth_dev);
1019 lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
1021 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1023 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
1024 lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1025 LIO_VF_TRUST_MIN_VERSION);
1029 if (!lio_dev->intf_open) {
1030 lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n",
1035 lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC;
1036 lio_change_dev_flag(eth_dev);
1040 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
1042 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1044 if (!lio_dev->intf_open) {
1045 lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
1050 lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
1051 lio_change_dev_flag(eth_dev);
1055 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
1057 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1059 if (!lio_dev->intf_open) {
1060 lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
1065 lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
1066 lio_change_dev_flag(eth_dev);
1070 lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
1072 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1073 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1074 struct rte_eth_rss_reta_entry64 reta_conf[8];
1075 struct rte_eth_rss_conf rss_conf;
1078 /* Configure the RSS key and the RSS protocols used to compute
1079 * the RSS hash of input packets.
1081 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1082 if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
1083 rss_state->hash_disable = 1;
1084 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1088 if (rss_conf.rss_key == NULL)
1089 rss_conf.rss_key = lio_rss_key; /* Default hash key */
1091 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1093 memset(reta_conf, 0, sizeof(reta_conf));
1094 for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
1095 uint8_t q_idx, conf_idx, reta_idx;
1097 q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
1098 i % eth_dev->data->nb_rx_queues : 0);
1099 conf_idx = i / RTE_RETA_GROUP_SIZE;
1100 reta_idx = i % RTE_RETA_GROUP_SIZE;
1101 reta_conf[conf_idx].reta[reta_idx] = q_idx;
1102 reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
1105 lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
1109 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
1111 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1112 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1113 struct rte_eth_rss_conf rss_conf;
1115 switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
1117 lio_dev_rss_configure(eth_dev);
1119 case ETH_MQ_RX_NONE:
1120 /* if mq_mode is none, disable rss mode. */
1122 memset(&rss_conf, 0, sizeof(rss_conf));
1123 rss_state->hash_disable = 1;
1124 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1129 * Setup our receive queue/ringbuffer. This is the
1130 * queue the Octeon uses to send us packets and
1131 * responses. We are given a memory pool for our
1132 * packet buffers that are used to populate the receive
1136 * Pointer to the structure rte_eth_dev
1139 * @param num_rx_descs
1140 * Number of entries in the queue
1142 * Where to allocate memory
1144 * Pointer to the struction rte_eth_rxconf
1146 * Pointer to the packet pool
1149 * - On success, return 0
1150 * - On failure, return -1
1153 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1154 uint16_t num_rx_descs, unsigned int socket_id,
1155 const struct rte_eth_rxconf *rx_conf __rte_unused,
1156 struct rte_mempool *mp)
1158 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1159 struct rte_pktmbuf_pool_private *mbp_priv;
1160 uint32_t fw_mapped_oq;
1163 if (q_no >= lio_dev->nb_rx_queues) {
1164 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
1168 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
1170 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
1172 /* Free previous allocation if any */
1173 if (eth_dev->data->rx_queues[q_no] != NULL) {
1174 lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
1175 eth_dev->data->rx_queues[q_no] = NULL;
1178 mbp_priv = rte_mempool_get_priv(mp);
1179 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1181 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
1183 lio_dev_err(lio_dev, "droq allocation failed\n");
1187 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
1193 * Release the receive queue/ringbuffer. Called by
1197 * Opaque pointer to the receive queue to release
1203 lio_dev_rx_queue_release(void *rxq)
1205 struct lio_droq *droq = rxq;
1210 lio_delete_droq_queue(droq->lio_dev, oq_no);
1215 * Allocate and initialize SW ring. Initialize associated HW registers.
1218 * Pointer to structure rte_eth_dev
1223 * @param num_tx_descs
1224 * Number of ringbuffer descriptors
1227 * NUMA socket id, used for memory allocations
1230 * Pointer to the structure rte_eth_txconf
1233 * - On success, return 0
1234 * - On failure, return -errno value
1237 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1238 uint16_t num_tx_descs, unsigned int socket_id,
1239 const struct rte_eth_txconf *tx_conf __rte_unused)
1241 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1242 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
1245 if (q_no >= lio_dev->nb_tx_queues) {
1246 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
1250 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
1252 /* Free previous allocation if any */
1253 if (eth_dev->data->tx_queues[q_no] != NULL) {
1254 lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
1255 eth_dev->data->tx_queues[q_no] = NULL;
1258 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
1259 num_tx_descs, lio_dev, socket_id);
1262 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
1266 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
1267 lio_dev->instr_queue[fw_mapped_iq]->nb_desc,
1271 lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
1275 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
1281 * Release the transmit queue/ringbuffer. Called by
1285 * Opaque pointer to the transmit queue to release
1291 lio_dev_tx_queue_release(void *txq)
1293 struct lio_instr_queue *tq = txq;
1294 uint32_t fw_mapped_iq_no;
1299 lio_delete_sglist(tq);
1301 fw_mapped_iq_no = tq->txpciq.s.q_no;
1302 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
1307 * Api to check link state.
1310 lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
1312 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1313 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1314 struct lio_link_status_resp *resp;
1315 union octeon_link_status *ls;
1316 struct lio_soft_command *sc;
1319 if (!lio_dev->intf_open)
1322 resp_size = sizeof(struct lio_link_status_resp);
1323 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1327 resp = (struct lio_link_status_resp *)sc->virtrptr;
1328 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1329 LIO_OPCODE_INFO, 0, 0, 0);
1331 /* Setting wait time in seconds */
1332 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1334 if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
1335 goto get_status_fail;
1337 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1338 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1343 goto get_status_fail;
1345 ls = &resp->link_info.link;
1347 lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
1349 if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
1350 if (ls->s.mtu < eth_dev->data->mtu) {
1351 lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n",
1353 eth_dev->data->mtu = ls->s.mtu;
1355 lio_dev->linfo.link.link_status64 = ls->link_status64;
1356 lio_dev_link_update(eth_dev, 0);
1359 lio_free_soft_command(sc);
1364 lio_free_soft_command(sc);
1367 /* This function will be invoked every LSC_TIMEOUT ns (100ms)
1368 * and will update link state if it changes.
1371 lio_sync_link_state_check(void *eth_dev)
1373 struct lio_device *lio_dev =
1374 (((struct rte_eth_dev *)eth_dev)->data->dev_private);
1376 if (lio_dev->port_configured)
1377 lio_dev_get_link_status(eth_dev);
1379 /* Schedule periodic link status check.
1380 * Stop check if interface is close and start again while opening.
1382 if (lio_dev->intf_open)
1383 rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
1388 lio_dev_start(struct rte_eth_dev *eth_dev)
1391 uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1392 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1393 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1396 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
1398 if (lio_dev->fn_list.enable_io_queues(lio_dev))
1401 if (lio_send_rx_ctrl_cmd(eth_dev, 1))
1404 /* Ready for link status updates */
1405 lio_dev->intf_open = 1;
1408 /* Configure RSS if device configured with multiple RX queues. */
1409 lio_dev_mq_rx_configure(eth_dev);
1411 /* Before update the link info,
1412 * must set linfo.link.link_status64 to 0.
1414 lio_dev->linfo.link.link_status64 = 0;
1416 /* start polling for lsc */
1417 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
1418 lio_sync_link_state_check,
1421 lio_dev_err(lio_dev,
1422 "link state check handler creation failed\n");
1423 goto dev_lsc_handle_error;
1426 while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
1429 if (lio_dev->linfo.link.link_status64 == 0) {
1431 goto dev_mtu_set_error;
1434 mtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN);
1435 if (mtu < RTE_ETHER_MIN_MTU)
1436 mtu = RTE_ETHER_MIN_MTU;
1438 if (eth_dev->data->mtu != mtu) {
1439 ret = lio_dev_mtu_set(eth_dev, mtu);
1441 goto dev_mtu_set_error;
1447 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1449 dev_lsc_handle_error:
1450 lio_dev->intf_open = 0;
1451 lio_send_rx_ctrl_cmd(eth_dev, 0);
1456 /* Stop device and disable input/output functions */
1458 lio_dev_stop(struct rte_eth_dev *eth_dev)
1460 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1462 lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
1463 lio_dev->intf_open = 0;
1466 /* Cancel callback if still running. */
1467 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1469 lio_send_rx_ctrl_cmd(eth_dev, 0);
1471 lio_wait_for_instr_fetch(lio_dev);
1473 /* Clear recorded link status */
1474 lio_dev->linfo.link.link_status64 = 0;
1478 lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
1480 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1482 if (!lio_dev->intf_open) {
1483 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1487 if (lio_dev->linfo.link.s.link_up) {
1488 lio_dev_info(lio_dev, "Link is already UP\n");
1492 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
1493 lio_dev_err(lio_dev, "Unable to set Link UP\n");
1497 lio_dev->linfo.link.s.link_up = 1;
1498 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1504 lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
1506 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1508 if (!lio_dev->intf_open) {
1509 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1513 if (!lio_dev->linfo.link.s.link_up) {
1514 lio_dev_info(lio_dev, "Link is already DOWN\n");
1518 lio_dev->linfo.link.s.link_up = 0;
1519 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1521 if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
1522 lio_dev->linfo.link.s.link_up = 1;
1523 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1524 lio_dev_err(lio_dev, "Unable to set Link Down\n");
1532 * Reset and stop the device. This occurs on the first
1533 * call to this routine. Subsequent calls will simply
1534 * return. NB: This will require the NIC to be rebooted.
1537 * Pointer to the structure rte_eth_dev
1543 lio_dev_close(struct rte_eth_dev *eth_dev)
1545 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1547 lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
1549 if (lio_dev->intf_open)
1550 lio_dev_stop(eth_dev);
1552 /* Reset ioq regs */
1553 lio_dev->fn_list.setup_device_regs(lio_dev);
1555 if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) {
1556 cn23xx_vf_ask_pf_to_do_flr(lio_dev);
1557 rte_delay_ms(LIO_PCI_FLR_WAIT);
1561 lio_dev->fn_list.free_mbox(lio_dev);
1563 /* Free glist resources */
1564 rte_free(lio_dev->glist_head);
1565 rte_free(lio_dev->glist_lock);
1566 lio_dev->glist_head = NULL;
1567 lio_dev->glist_lock = NULL;
1569 lio_dev->port_configured = 0;
1571 /* Delete all queues */
1572 lio_dev_clear_queues(eth_dev);
1576 * Enable tunnel rx checksum verification from firmware.
1579 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
1581 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1582 struct lio_dev_ctrl_cmd ctrl_cmd;
1583 struct lio_ctrl_pkt ctrl_pkt;
1585 /* flush added to prevent cmd failure
1586 * incase the queue is full
1588 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1590 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1591 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1593 ctrl_cmd.eth_dev = eth_dev;
1596 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
1597 ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
1598 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1600 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1601 lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
1605 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1606 lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
1610 * Enable checksum calculation for inner packet in a tunnel.
1613 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
1615 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1616 struct lio_dev_ctrl_cmd ctrl_cmd;
1617 struct lio_ctrl_pkt ctrl_pkt;
1619 /* flush added to prevent cmd failure
1620 * incase the queue is full
1622 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1624 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1625 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1627 ctrl_cmd.eth_dev = eth_dev;
1630 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
1631 ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
1632 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1634 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1635 lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
1639 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1640 lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
1644 lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq,
1647 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1648 struct lio_dev_ctrl_cmd ctrl_cmd;
1649 struct lio_ctrl_pkt ctrl_pkt;
1651 if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) {
1652 lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1653 LIO_Q_RECONF_MIN_VERSION);
1657 /* flush added to prevent cmd failure
1658 * incase the queue is full
1660 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1662 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1663 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1665 ctrl_cmd.eth_dev = eth_dev;
1668 ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL;
1669 ctrl_pkt.ncmd.s.param1 = num_txq;
1670 ctrl_pkt.ncmd.s.param2 = num_rxq;
1671 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1673 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1674 lio_dev_err(lio_dev, "Failed to send queue count control command\n");
1678 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
1679 lio_dev_err(lio_dev, "Queue count control command timed out\n");
1687 lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq)
1689 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1691 if (lio_dev->nb_rx_queues != num_rxq ||
1692 lio_dev->nb_tx_queues != num_txq) {
1693 if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq))
1695 lio_dev->nb_rx_queues = num_rxq;
1696 lio_dev->nb_tx_queues = num_txq;
1699 if (lio_dev->intf_open)
1700 lio_dev_stop(eth_dev);
1702 /* Reset ioq registers */
1703 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
1704 lio_dev_err(lio_dev, "Failed to configure device registers\n");
1712 lio_dev_configure(struct rte_eth_dev *eth_dev)
1714 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1715 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1716 int retval, num_iqueues, num_oqueues;
1717 uint8_t mac[RTE_ETHER_ADDR_LEN], i;
1718 struct lio_if_cfg_resp *resp;
1719 struct lio_soft_command *sc;
1720 union lio_if_cfg if_cfg;
1723 PMD_INIT_FUNC_TRACE();
1725 /* Inform firmware about change in number of queues to use.
1726 * Disable IO queues and reset registers for re-configuration.
1728 if (lio_dev->port_configured)
1729 return lio_reconf_queues(eth_dev,
1730 eth_dev->data->nb_tx_queues,
1731 eth_dev->data->nb_rx_queues);
1733 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
1734 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
1736 /* Set max number of queues which can be re-configured. */
1737 lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues;
1738 lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues;
1740 resp_size = sizeof(struct lio_if_cfg_resp);
1741 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1745 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1747 /* Firmware doesn't have capability to reconfigure the queues,
1748 * Claim all queues, and use as many required
1750 if_cfg.if_cfg64 = 0;
1751 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
1752 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
1753 if_cfg.s.base_queue = 0;
1755 if_cfg.s.gmx_port_id = lio_dev->pf_num;
1757 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1758 LIO_OPCODE_IF_CFG, 0,
1759 if_cfg.if_cfg64, 0);
1761 /* Setting wait time in seconds */
1762 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1764 retval = lio_send_soft_command(lio_dev, sc);
1765 if (retval == LIO_IQ_SEND_FAILED) {
1766 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
1768 /* Soft instr is freed by driver in case of failure. */
1769 goto nic_config_fail;
1772 /* Sleep on a wait queue till the cond flag indicates that the
1773 * response arrived or timed-out.
1775 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1776 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1777 lio_process_ordered_list(lio_dev);
1781 retval = resp->status;
1783 lio_dev_err(lio_dev, "iq/oq config failed\n");
1784 goto nic_config_fail;
1787 strlcpy(lio_dev->firmware_version,
1788 resp->cfg_info.lio_firmware_version, LIO_FW_VERSION_LENGTH);
1790 lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1791 sizeof(struct octeon_if_cfg_info) >> 3);
1793 num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
1794 num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
1796 if (!(num_iqueues) || !(num_oqueues)) {
1797 lio_dev_err(lio_dev,
1798 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
1799 (unsigned long)resp->cfg_info.iqmask,
1800 (unsigned long)resp->cfg_info.oqmask);
1801 goto nic_config_fail;
1804 lio_dev_dbg(lio_dev,
1805 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
1806 eth_dev->data->port_id,
1807 (unsigned long)resp->cfg_info.iqmask,
1808 (unsigned long)resp->cfg_info.oqmask,
1809 num_iqueues, num_oqueues);
1811 lio_dev->linfo.num_rxpciq = num_oqueues;
1812 lio_dev->linfo.num_txpciq = num_iqueues;
1814 for (i = 0; i < num_oqueues; i++) {
1815 lio_dev->linfo.rxpciq[i].rxpciq64 =
1816 resp->cfg_info.linfo.rxpciq[i].rxpciq64;
1817 lio_dev_dbg(lio_dev, "index %d OQ %d\n",
1818 i, lio_dev->linfo.rxpciq[i].s.q_no);
1821 for (i = 0; i < num_iqueues; i++) {
1822 lio_dev->linfo.txpciq[i].txpciq64 =
1823 resp->cfg_info.linfo.txpciq[i].txpciq64;
1824 lio_dev_dbg(lio_dev, "index %d IQ %d\n",
1825 i, lio_dev->linfo.txpciq[i].s.q_no);
1828 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1829 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1830 lio_dev->linfo.link.link_status64 =
1831 resp->cfg_info.linfo.link.link_status64;
1833 /* 64-bit swap required on LE machines */
1834 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
1835 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
1836 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
1839 /* Copy the permanent MAC address */
1840 rte_ether_addr_copy((struct rte_ether_addr *)mac,
1841 ð_dev->data->mac_addrs[0]);
1843 /* enable firmware checksum support for tunnel packets */
1844 lio_enable_hw_tunnel_rx_checksum(eth_dev);
1845 lio_enable_hw_tunnel_tx_checksum(eth_dev);
1847 lio_dev->glist_lock =
1848 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
1849 if (lio_dev->glist_lock == NULL)
1852 lio_dev->glist_head =
1853 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
1855 if (lio_dev->glist_head == NULL) {
1856 rte_free(lio_dev->glist_lock);
1857 lio_dev->glist_lock = NULL;
1861 lio_dev_link_update(eth_dev, 0);
1863 lio_dev->port_configured = 1;
1865 lio_free_soft_command(sc);
1867 /* Reset ioq regs */
1868 lio_dev->fn_list.setup_device_regs(lio_dev);
1870 /* Free iq_0 used during init */
1871 lio_free_instr_queue0(lio_dev);
1876 lio_dev_err(lio_dev, "Failed retval %d\n", retval);
1877 lio_free_soft_command(sc);
1878 lio_free_instr_queue0(lio_dev);
1883 /* Define our ethernet definitions */
1884 static const struct eth_dev_ops liovf_eth_dev_ops = {
1885 .dev_configure = lio_dev_configure,
1886 .dev_start = lio_dev_start,
1887 .dev_stop = lio_dev_stop,
1888 .dev_set_link_up = lio_dev_set_link_up,
1889 .dev_set_link_down = lio_dev_set_link_down,
1890 .dev_close = lio_dev_close,
1891 .promiscuous_enable = lio_dev_promiscuous_enable,
1892 .promiscuous_disable = lio_dev_promiscuous_disable,
1893 .allmulticast_enable = lio_dev_allmulticast_enable,
1894 .allmulticast_disable = lio_dev_allmulticast_disable,
1895 .link_update = lio_dev_link_update,
1896 .stats_get = lio_dev_stats_get,
1897 .xstats_get = lio_dev_xstats_get,
1898 .xstats_get_names = lio_dev_xstats_get_names,
1899 .stats_reset = lio_dev_stats_reset,
1900 .xstats_reset = lio_dev_xstats_reset,
1901 .dev_infos_get = lio_dev_info_get,
1902 .vlan_filter_set = lio_dev_vlan_filter_set,
1903 .rx_queue_setup = lio_dev_rx_queue_setup,
1904 .rx_queue_release = lio_dev_rx_queue_release,
1905 .tx_queue_setup = lio_dev_tx_queue_setup,
1906 .tx_queue_release = lio_dev_tx_queue_release,
1907 .reta_update = lio_dev_rss_reta_update,
1908 .reta_query = lio_dev_rss_reta_query,
1909 .rss_hash_conf_get = lio_dev_rss_hash_conf_get,
1910 .rss_hash_update = lio_dev_rss_hash_update,
1911 .udp_tunnel_port_add = lio_dev_udp_tunnel_add,
1912 .udp_tunnel_port_del = lio_dev_udp_tunnel_del,
1913 .mtu_set = lio_dev_mtu_set,
1917 lio_check_pf_hs_response(void *lio_dev)
1919 struct lio_device *dev = lio_dev;
1921 /* check till response arrives */
1922 if (dev->pfvf_hsword.coproc_tics_per_us)
1925 cn23xx_vf_handle_mbox(dev);
1927 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
1931 * \brief Identify the LIO device and to map the BAR address space
1932 * @param lio_dev lio device
1935 lio_chip_specific_setup(struct lio_device *lio_dev)
1937 struct rte_pci_device *pdev = lio_dev->pci_dev;
1938 uint32_t dev_id = pdev->id.device_id;
1943 case LIO_CN23XX_VF_VID:
1944 lio_dev->chip_id = LIO_CN23XX_VF_VID;
1945 ret = cn23xx_vf_setup_device(lio_dev);
1950 lio_dev_err(lio_dev, "Unsupported Chip\n");
1954 lio_dev_info(lio_dev, "DEVICE : %s\n", s);
1960 lio_first_time_init(struct lio_device *lio_dev,
1961 struct rte_pci_device *pdev)
1965 PMD_INIT_FUNC_TRACE();
1967 /* set dpdk specific pci device pointer */
1968 lio_dev->pci_dev = pdev;
1970 /* Identify the LIO type and set device ops */
1971 if (lio_chip_specific_setup(lio_dev)) {
1972 lio_dev_err(lio_dev, "Chip specific setup failed\n");
1976 /* Initialize soft command buffer pool */
1977 if (lio_setup_sc_buffer_pool(lio_dev)) {
1978 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
1982 /* Initialize lists to manage the requests of different types that
1983 * arrive from applications for this lio device.
1985 lio_setup_response_list(lio_dev);
1987 if (lio_dev->fn_list.setup_mbox(lio_dev)) {
1988 lio_dev_err(lio_dev, "Mailbox setup failed\n");
1992 /* Check PF response */
1993 lio_check_pf_hs_response((void *)lio_dev);
1995 /* Do handshake and exit if incompatible PF driver */
1996 if (cn23xx_pfvf_handshake(lio_dev))
1999 /* Request and wait for device reset. */
2000 if (pdev->kdrv == RTE_KDRV_IGB_UIO) {
2001 cn23xx_vf_ask_pf_to_do_flr(lio_dev);
2002 /* FLR wait time doubled as a precaution. */
2003 rte_delay_ms(LIO_PCI_FLR_WAIT * 2);
2006 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
2007 lio_dev_err(lio_dev, "Failed to configure device registers\n");
2011 if (lio_setup_instr_queue0(lio_dev)) {
2012 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
2016 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
2018 lio_dev->max_tx_queues = dpdk_queues;
2019 lio_dev->max_rx_queues = dpdk_queues;
2021 /* Enable input and output queues for this device */
2022 if (lio_dev->fn_list.enable_io_queues(lio_dev))
2028 lio_free_sc_buffer_pool(lio_dev);
2029 if (lio_dev->mbox[0])
2030 lio_dev->fn_list.free_mbox(lio_dev);
2031 if (lio_dev->instr_queue[0])
2032 lio_free_instr_queue0(lio_dev);
2038 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2040 struct lio_device *lio_dev = LIO_DEV(eth_dev);
2042 PMD_INIT_FUNC_TRACE();
2044 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2047 /* lio_free_sc_buffer_pool */
2048 lio_free_sc_buffer_pool(lio_dev);
2050 eth_dev->dev_ops = NULL;
2051 eth_dev->rx_pkt_burst = NULL;
2052 eth_dev->tx_pkt_burst = NULL;
2058 lio_eth_dev_init(struct rte_eth_dev *eth_dev)
2060 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
2061 struct lio_device *lio_dev = LIO_DEV(eth_dev);
2063 PMD_INIT_FUNC_TRACE();
2065 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
2066 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
2068 /* Primary does the initialization. */
2069 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2072 rte_eth_copy_pci_info(eth_dev, pdev);
2074 if (pdev->mem_resource[0].addr) {
2075 lio_dev->hw_addr = pdev->mem_resource[0].addr;
2077 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
2081 lio_dev->eth_dev = eth_dev;
2082 /* set lio device print string */
2083 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
2084 "%s[%02x:%02x.%x]", pdev->driver->driver.name,
2085 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2087 lio_dev->port_id = eth_dev->data->port_id;
2089 if (lio_first_time_init(lio_dev, pdev)) {
2090 lio_dev_err(lio_dev, "Device init failed\n");
2094 eth_dev->dev_ops = &liovf_eth_dev_ops;
2095 eth_dev->data->mac_addrs = rte_zmalloc("lio", RTE_ETHER_ADDR_LEN, 0);
2096 if (eth_dev->data->mac_addrs == NULL) {
2097 lio_dev_err(lio_dev,
2098 "MAC addresses memory allocation failed\n");
2099 eth_dev->dev_ops = NULL;
2100 eth_dev->rx_pkt_burst = NULL;
2101 eth_dev->tx_pkt_burst = NULL;
2105 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
2108 lio_dev->port_configured = 0;
2109 /* Always allow unicast packets */
2110 lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
2116 lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2117 struct rte_pci_device *pci_dev)
2119 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device),
2124 lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2126 return rte_eth_dev_pci_generic_remove(pci_dev,
2127 lio_eth_dev_uninit);
2130 /* Set of PCI devices this driver supports */
2131 static const struct rte_pci_id pci_id_liovf_map[] = {
2132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
2133 { .vendor_id = 0, /* sentinel */ }
2136 static struct rte_pci_driver rte_liovf_pmd = {
2137 .id_table = pci_id_liovf_map,
2138 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2139 .probe = lio_eth_dev_pci_probe,
2140 .remove = lio_eth_dev_pci_remove,
2143 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
2144 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
2145 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
2147 RTE_INIT(lio_init_log)
2149 lio_logtype_init = rte_log_register("pmd.net.liquidio.init");
2150 if (lio_logtype_init >= 0)
2151 rte_log_set_level(lio_logtype_init, RTE_LOG_NOTICE);
2152 lio_logtype_driver = rte_log_register("pmd.net.liquidio.driver");
2153 if (lio_logtype_driver >= 0)
2154 rte_log_set_level(lio_logtype_driver, RTE_LOG_NOTICE);